From 7cbe0c76ef8fd2aabf9f07de0156941ce3ed44b0 Mon Sep 17 00:00:00 2001
From: Chris Sosnin <48099298+slumber@users.noreply.github.com>
Date: Wed, 27 Sep 2023 13:32:02 +0300
Subject: [PATCH] Migrate polkadot-primitives to v6 (#1543)

- Async-backing related primitives are stable `primitives::v6`
- Async-backing API is now part of `api_version(7)`
- It's enabled on Rococo and Westend runtimes

---------

Signed-off-by: Andrei Sandu <andrei-mihail@parity.io>
Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com>
---
 .gitlab/pipeline/zombienet/polkadot.yml       |   30 +-
 .../chain-bridge-hub-cumulus/src/lib.rs       |    2 +-
 .../Cargo.toml                                |    3 -
 .../relay-chain-minimal-node/Cargo.toml       |    4 -
 .../src/blockchain_rpc_client.rs              |   10 +-
 .../src/collator_overseer.rs                  |   11 +-
 .../relay-chain-minimal-node/src/lib.rs       |   12 +-
 .../src/rpc_client.rs                         |   25 +-
 cumulus/client/service/Cargo.toml             |    5 -
 cumulus/pallets/parachain-system/src/lib.rs   |    2 +-
 cumulus/parachain-template/node/Cargo.toml    |    5 +-
 .../emulated/common/src/lib.rs                |    6 +-
 cumulus/test/relay-sproof-builder/src/lib.rs  |    2 +-
 polkadot/Cargo.toml                           |    1 -
 polkadot/cli/Cargo.toml                       |    1 -
 polkadot/node/collation-generation/src/lib.rs |    7 +-
 .../node/collation-generation/src/tests.rs    |    8 +-
 polkadot/node/core/backing/src/tests/mod.rs   |    2 +-
 .../src/tests/prospective_parachains.rs       |    8 +-
 .../dispute-coordinator/src/initialized.rs    |   12 +-
 .../src/fragment_tree.rs                      |   10 +-
 .../core/prospective-parachains/src/lib.rs    |   13 +-
 .../core/prospective-parachains/src/tests.rs  |   12 +-
 polkadot/node/core/runtime-api/src/cache.rs   |   74 +-
 polkadot/node/core/runtime-api/src/lib.rs     |   33 +-
 polkadot/node/core/runtime-api/src/tests.rs   |   22 +-
 .../network/approval-distribution/src/lib.rs  |   48 +-
 .../approval-distribution/src/tests.rs        |   16 +-
 .../network/bitfield-distribution/src/lib.rs  |   19 +-
 .../bitfield-distribution/src/tests.rs        |   10 +-
 polkadot/node/network/bridge/src/rx/mod.rs    |   44 +-
 polkadot/node/network/bridge/src/rx/tests.rs  |   29 +-
 polkadot/node/network/bridge/src/tx/mod.rs    |   28 +-
 polkadot/node/network/bridge/src/tx/tests.rs  |   19 +-
 .../src/collator_side/collation.rs            |   21 +-
 .../src/collator_side/mod.rs                  |   52 +-
 .../src/collator_side/tests/mod.rs            |   49 +-
 .../tests/prospective_parachains.rs           |   39 +-
 .../node/network/collator-protocol/src/lib.rs |   11 +-
 .../src/validator_side/collation.rs           |    2 +-
 .../src/validator_side/mod.rs                 |   44 +-
 .../src/validator_side/tests/mod.rs           |   25 +-
 .../tests/prospective_parachains.rs           |   36 +-
 .../node/network/gossip-support/src/lib.rs    |    2 +-
 polkadot/node/network/protocol/Cargo.toml     |    3 -
 polkadot/node/network/protocol/src/lib.rs     |   62 +-
 .../node/network/protocol/src/peer_set.rs     |   23 +-
 .../protocol/src/request_response/mod.rs      |   24 +-
 .../protocol/src/request_response/outgoing.rs |   14 +-
 .../request_response/{vstaging.rs => v2.rs}   |    8 +-
 .../src/legacy_v1/mod.rs                      |   30 +-
 .../src/legacy_v1/tests.rs                    |   12 +-
 .../network/statement-distribution/src/lib.rs |   55 +-
 .../src/{vstaging => v2}/candidates.rs        |    2 +-
 .../src/{vstaging => v2}/cluster.rs           |    4 +-
 .../src/{vstaging => v2}/grid.rs              |    8 +-
 .../src/{vstaging => v2}/groups.rs            |    3 +-
 .../src/{vstaging => v2}/mod.rs               |   69 +-
 .../src/{vstaging => v2}/requests.rs          |    8 +-
 .../src/{vstaging => v2}/statement_store.rs   |    4 +-
 .../src/{vstaging => v2}/tests/cluster.rs     |   46 +-
 .../src/{vstaging => v2}/tests/grid.rs        |   86 +-
 .../src/{vstaging => v2}/tests/mod.rs         |   12 +-
 .../src/{vstaging => v2}/tests/requests.rs    |   86 +-
 polkadot/node/service/Cargo.toml              |    4 -
 polkadot/node/service/src/lib.rs              |    8 +-
 polkadot/node/service/src/overseer.rs         |   18 +-
 polkadot/node/subsystem-types/src/messages.rs |   24 +-
 .../subsystem-types/src/runtime_client.rs     |   46 +-
 .../src/backing_implicit_view.rs              |    2 +-
 .../src/inclusion_emulator/mod.rs             | 1435 +++++++++++++++-
 .../src/inclusion_emulator/staging.rs         | 1450 -----------------
 polkadot/node/subsystem-util/src/lib.rs       |    4 +-
 .../node/subsystem-util/src/runtime/mod.rs    |   29 +-
 .../test-parachains/adder/collator/Cargo.toml |    3 -
 polkadot/primitives/src/lib.rs                |   31 +-
 polkadot/primitives/src/runtime_api.rs        |   30 +-
 polkadot/primitives/src/v6/async_backing.rs   |  132 ++
 .../src/{v5 => v6}/executor_params.rs         |    0
 polkadot/primitives/src/{v5 => v6}/metrics.rs |    0
 polkadot/primitives/src/{v5 => v6}/mod.rs     |   12 +-
 polkadot/primitives/src/{v5 => v6}/signed.rs  |    0
 .../primitives/src/{v5 => v6}/slashing.rs     |    0
 polkadot/primitives/src/vstaging/mod.rs       |  118 --
 .../node/backing/prospective-parachains.md    |    2 +-
 polkadot/runtime/kusama/src/lib.rs            |    2 +-
 .../src/assigner_on_demand/tests.rs           |    2 +-
 .../runtime/parachains/src/configuration.rs   |    2 +-
 .../src/configuration/migration/v6.rs         |    2 +-
 .../src/configuration/migration/v7.rs         |    2 +-
 .../src/configuration/migration/v8.rs         |    3 +-
 .../parachains/src/disputes/slashing.rs       |    2 +-
 .../parachains/src/runtime_api_impl/mod.rs    |    3 +-
 .../src/runtime_api_impl/{v5.rs => v7.rs}     |  103 +-
 .../src/runtime_api_impl/vstaging.rs          |  108 --
 polkadot/runtime/polkadot/src/lib.rs          |    2 +-
 polkadot/runtime/rococo/src/lib.rs            |   15 +-
 polkadot/runtime/test-runtime/src/lib.rs      |    2 +-
 polkadot/runtime/westend/src/lib.rs           |   16 +-
 .../001-async-backing-compatibility.toml      |   34 -
 .../001-async-backing-compatibility.zndsl     |   23 -
 .../002-async-backing-runtime-upgrade.toml    |   54 -
 .../002-async-backing-runtime-upgrade.zndsl   |   34 -
 .../003-async-backing-collator-mix.toml       |   40 -
 .../003-async-backing-collator-mix.zndsl      |   19 -
 .../zombienet_tests/async_backing/README.md   |    9 -
 .../0002-parachains-upgrade-smoke-test.toml   |    4 +-
 107 files changed, 2410 insertions(+), 2792 deletions(-)
 rename polkadot/node/network/protocol/src/request_response/{vstaging.rs => v2.rs} (93%)
 rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/candidates.rs (99%)
 rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/cluster.rs (99%)
 rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/grid.rs (99%)
 rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/groups.rs (96%)
 rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/mod.rs (97%)
 rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/requests.rs (99%)
 rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/statement_store.rs (98%)
 rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/tests/cluster.rs (95%)
 rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/tests/grid.rs (95%)
 rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/tests/mod.rs (98%)
 rename polkadot/node/network/statement-distribution/src/{vstaging => v2}/tests/requests.rs (95%)
 delete mode 100644 polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs
 create mode 100644 polkadot/primitives/src/v6/async_backing.rs
 rename polkadot/primitives/src/{v5 => v6}/executor_params.rs (100%)
 rename polkadot/primitives/src/{v5 => v6}/metrics.rs (100%)
 rename polkadot/primitives/src/{v5 => v6}/mod.rs (99%)
 rename polkadot/primitives/src/{v5 => v6}/signed.rs (100%)
 rename polkadot/primitives/src/{v5 => v6}/slashing.rs (100%)
 rename polkadot/runtime/parachains/src/runtime_api_impl/{v5.rs => v7.rs} (79%)
 delete mode 100644 polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml
 delete mode 100644 polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl
 delete mode 100644 polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml
 delete mode 100644 polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl
 delete mode 100644 polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml
 delete mode 100644 polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl
 delete mode 100644 polkadot/zombienet_tests/async_backing/README.md

diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml
index e420baf486a..0402c194134 100644
--- a/.gitlab/pipeline/zombienet/polkadot.yml
+++ b/.gitlab/pipeline/zombienet/polkadot.yml
@@ -110,7 +110,7 @@ zombienet-polkadot-smoke-0001-parachains-smoke-test:
     - .zombienet-polkadot-common
   before_script:
     - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG}
-    - export COL_IMAGE="docker.io/paritypr/colander:4519" # The collator image is fixed
+    - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG}
     - echo "Zombienet Tests Config"
     - echo "gh-dir ${GH_DIR}"
     - echo "local-dir ${LOCAL_DIR}"
@@ -127,12 +127,12 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke:
     - .zombienet-polkadot-common
   before_script:
     - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG}
-    - export COL_IMAGE="docker.io/parity/polkadot-collator:latest" # Use cumulus lastest image
+    - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:${DOCKER_IMAGES_VERSION}"
     - echo "Zombienet Tests Config"
     - echo "gh-dir ${GH_DIR}"
     - echo "local-dir ${LOCAL_DIR}"
     - echo "polkadot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}"
-    - echo "colander image ${COL_IMAGE}"
+    - echo "polkadot-parachain image ${CUMULUS_IMAGE}"
     - echo "malus image ${MALUS_IMAGE}"
   script:
     - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
@@ -193,27 +193,3 @@ zombienet-polkadot-malus-0001-dispute-valid:
     - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
       --local-dir="${LOCAL_DIR}/integrationtests"
       --test="0001-dispute-valid-block.zndsl"
-
-zombienet-polkadot-async-backing-compatibility:
-  extends:
-    - .zombienet-polkadot-common
-  script:
-    - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
-      --local-dir="${LOCAL_DIR}/async_backing"
-      --test="001-async-backing-compatibility.zndsl"
-
-zombienet-polkadot-async-backing-runtime-upgrade:
-  extends:
-    - .zombienet-polkadot-common
-  script:
-    - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
-      --local-dir="${LOCAL_DIR}/async_backing"
-      --test="002-async-backing-runtime-upgrade.zndsl"
-
-zombienet-polkadot-async-backing-collator-mix:
-  extends:
-    - .zombienet-polkadot-common
-  script:
-    - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
-      --local-dir="${LOCAL_DIR}/async_backing"
-      --test="003-async-backing-collator-mix.zndsl"
diff --git a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs b/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs
index c1dbc6db36f..cd281324ee5 100644
--- a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs
+++ b/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs
@@ -52,7 +52,7 @@ pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75);
 /// This is a copy-paste from the cumulus repo's `parachains-common` crate.
 const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts(constants::WEIGHT_REF_TIME_PER_SECOND, 0)
 	.saturating_div(2)
-	.set_proof_size(polkadot_primitives::v5::MAX_POV_SIZE as u64);
+	.set_proof_size(polkadot_primitives::MAX_POV_SIZE as u64);
 
 /// All cumulus bridge hubs assume that about 5 percent of the block weight is consumed by
 /// `on_initialize` handlers. This is used to limit the maximal weight of a single extrinsic.
diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml
index 39eda5075e2..bc8d0d430c7 100644
--- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml
+++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml
@@ -41,6 +41,3 @@ metered = { package = "prioritized-metered-channel", version = "0.5.1", default-
 
 # Cumulus
 cumulus-test-service = { path = "../../test/service" }
-
-[features]
-network-protocol-staging = [ "polkadot-service/network-protocol-staging" ]
diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml
index 39056d6b651..226474d3d38 100644
--- a/cumulus/client/relay-chain-minimal-node/Cargo.toml
+++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml
@@ -41,7 +41,3 @@ tracing = "0.1.37"
 async-trait = "0.1.73"
 futures = "0.3.28"
 
-[features]
-network-protocol-staging = [
-	"polkadot-node-network-protocol/network-protocol-staging",
-]
diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs
index 57e16bc4283..3f4c08ecbb8 100644
--- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs
+++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs
@@ -22,8 +22,8 @@ use futures::{Stream, StreamExt};
 use polkadot_core_primitives::{Block, BlockNumber, Hash, Header};
 use polkadot_overseer::RuntimeApiSubsystemClient;
 use polkadot_primitives::{
+	async_backing::{AsyncBackingParams, BackingState},
 	slashing,
-	vstaging::{AsyncBackingParams, BackingState},
 };
 use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError};
 use sp_api::{ApiError, RuntimeApiInfo};
@@ -346,16 +346,16 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient {
 		Ok(self.rpc_client.parachain_host_minimum_backing_votes(at, session_index).await?)
 	}
 
-	async fn staging_async_backing_params(&self, at: Hash) -> Result<AsyncBackingParams, ApiError> {
-		Ok(self.rpc_client.parachain_host_staging_async_backing_params(at).await?)
+	async fn async_backing_params(&self, at: Hash) -> Result<AsyncBackingParams, ApiError> {
+		Ok(self.rpc_client.parachain_host_async_backing_params(at).await?)
 	}
 
-	async fn staging_para_backing_state(
+	async fn para_backing_state(
 		&self,
 		at: Hash,
 		para_id: cumulus_primitives_core::ParaId,
 	) -> Result<Option<BackingState>, ApiError> {
-		Ok(self.rpc_client.parachain_host_staging_para_backing_state(at, para_id).await?)
+		Ok(self.rpc_client.parachain_host_para_backing_state(at, para_id).await?)
 	}
 }
 
diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs
index bea2fc330a2..a83a18f7cd9 100644
--- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs
+++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs
@@ -30,7 +30,7 @@ use polkadot_node_network_protocol::{
 	peer_set::PeerSetProtocolNames,
 	request_response::{
 		v1::{self, AvailableDataFetchingRequest},
-		vstaging, IncomingRequestReceiver, ReqProtocolNames,
+		v2, IncomingRequestReceiver, ReqProtocolNames,
 	},
 };
 use polkadot_node_subsystem_util::metrics::{prometheus::Registry, Metrics};
@@ -63,9 +63,8 @@ pub(crate) struct CollatorOverseerGenArgs<'a> {
 	pub authority_discovery_service: AuthorityDiscoveryService,
 	/// Receiver for collation request protocol v1.
 	pub collation_req_receiver_v1: IncomingRequestReceiver<v1::CollationFetchingRequest>,
-	/// Receiver for collation request protocol vstaging.
-	pub collation_req_receiver_vstaging:
-		IncomingRequestReceiver<vstaging::CollationFetchingRequest>,
+	/// Receiver for collation request protocol v2.
+	pub collation_req_receiver_v2: IncomingRequestReceiver<v2::CollationFetchingRequest>,
 	/// Receiver for availability request protocol
 	pub available_data_req_receiver: IncomingRequestReceiver<AvailableDataFetchingRequest>,
 	/// Prometheus registry, commonly used for production systems, less so for test.
@@ -88,7 +87,7 @@ fn build_overseer(
 		sync_oracle,
 		authority_discovery_service,
 		collation_req_receiver_v1,
-		collation_req_receiver_vstaging,
+		collation_req_receiver_v2,
 		available_data_req_receiver,
 		registry,
 		spawner,
@@ -121,7 +120,7 @@ fn build_overseer(
 				peer_id: network_service.local_peer_id(),
 				collator_pair,
 				request_receiver_v1: collation_req_receiver_v1,
-				request_receiver_vstaging: collation_req_receiver_vstaging,
+				request_receiver_v2: collation_req_receiver_v2,
 				metrics: Metrics::register(registry)?,
 			};
 			CollatorProtocolSubsystem::new(side)
diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs
index 366d428eda7..08e4e8e34ab 100644
--- a/cumulus/client/relay-chain-minimal-node/src/lib.rs
+++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs
@@ -23,7 +23,7 @@ use polkadot_network_bridge::{peer_sets_info, IsAuthority};
 use polkadot_node_network_protocol::{
 	peer_set::PeerSetProtocolNames,
 	request_response::{
-		v1, vstaging, IncomingRequest, IncomingRequestReceiver, Protocol, ReqProtocolNames,
+		v1, v2, IncomingRequest, IncomingRequestReceiver, Protocol, ReqProtocolNames,
 	},
 };
 
@@ -182,7 +182,7 @@ async fn new_minimal_relay_chain(
 	}
 
 	let request_protocol_names = ReqProtocolNames::new(genesis_hash, config.chain_spec.fork_id());
-	let (collation_req_receiver_v1, collation_req_receiver_vstaging, available_data_req_receiver) =
+	let (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) =
 		build_request_response_protocol_receivers(&request_protocol_names, &mut net_config);
 
 	let best_header = relay_chain_rpc_client
@@ -212,7 +212,7 @@ async fn new_minimal_relay_chain(
 		sync_oracle,
 		authority_discovery_service,
 		collation_req_receiver_v1,
-		collation_req_receiver_vstaging,
+		collation_req_receiver_v2,
 		available_data_req_receiver,
 		registry: prometheus_registry.as_ref(),
 		spawner: task_manager.spawn_handle(),
@@ -234,13 +234,13 @@ fn build_request_response_protocol_receivers(
 	config: &mut FullNetworkConfiguration,
 ) -> (
 	IncomingRequestReceiver<v1::CollationFetchingRequest>,
-	IncomingRequestReceiver<vstaging::CollationFetchingRequest>,
+	IncomingRequestReceiver<v2::CollationFetchingRequest>,
 	IncomingRequestReceiver<v1::AvailableDataFetchingRequest>,
 ) {
 	let (collation_req_receiver_v1, cfg) =
 		IncomingRequest::get_config_receiver(request_protocol_names);
 	config.add_request_response_protocol(cfg);
-	let (collation_req_receiver_vstaging, cfg) =
+	let (collation_req_receiver_v2, cfg) =
 		IncomingRequest::get_config_receiver(request_protocol_names);
 	config.add_request_response_protocol(cfg);
 	let (available_data_req_receiver, cfg) =
@@ -248,5 +248,5 @@ fn build_request_response_protocol_receivers(
 	config.add_request_response_protocol(cfg);
 	let cfg = Protocol::ChunkFetchingV1.get_outbound_only_config(request_protocol_names);
 	config.add_request_response_protocol(cfg);
-	(collation_req_receiver_v1, collation_req_receiver_vstaging, available_data_req_receiver)
+	(collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver)
 }
diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs
index c1e92b249d7..b1fd7d1ab7d 100644
--- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs
+++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs
@@ -30,9 +30,8 @@ use parity_scale_codec::{Decode, Encode};
 
 use cumulus_primitives_core::{
 	relay_chain::{
-		slashing,
-		vstaging::{AsyncBackingParams, BackingState},
-		BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash,
+		async_backing::{AsyncBackingParams, BackingState},
+		slashing, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash,
 		CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo,
 		Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, OccupiedCoreAssumption,
 		PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode,
@@ -599,30 +598,22 @@ impl RelayChainRpcClient {
 	}
 
 	#[allow(missing_docs)]
-	pub async fn parachain_host_staging_async_backing_params(
+	pub async fn parachain_host_async_backing_params(
 		&self,
 		at: RelayHash,
 	) -> Result<AsyncBackingParams, RelayChainError> {
-		self.call_remote_runtime_function(
-			"ParachainHost_staging_async_backing_params",
-			at,
-			None::<()>,
-		)
-		.await
+		self.call_remote_runtime_function("ParachainHost_async_backing_params", at, None::<()>)
+			.await
 	}
 
 	#[allow(missing_docs)]
-	pub async fn parachain_host_staging_para_backing_state(
+	pub async fn parachain_host_para_backing_state(
 		&self,
 		at: RelayHash,
 		para_id: ParaId,
 	) -> Result<Option<BackingState>, RelayChainError> {
-		self.call_remote_runtime_function(
-			"ParachainHost_staging_para_backing_state",
-			at,
-			Some(para_id),
-		)
-		.await
+		self.call_remote_runtime_function("ParachainHost_para_backing_state", at, Some(para_id))
+			.await
 	}
 
 	fn send_register_message_to_worker(
diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml
index b53bdbdfc81..b7c274ceecd 100644
--- a/cumulus/client/service/Cargo.toml
+++ b/cumulus/client/service/Cargo.toml
@@ -40,8 +40,3 @@ cumulus-relay-chain-interface = { path = "../relay-chain-interface" }
 cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" }
 cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" }
 
-[features]
-network-protocol-staging = [
-	"cumulus-relay-chain-inprocess-interface/network-protocol-staging",
-	"cumulus-relay-chain-minimal-node/network-protocol-staging",
-]
diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs
index a7e59a61c9b..a8f0a49223f 100644
--- a/cumulus/pallets/parachain-system/src/lib.rs
+++ b/cumulus/pallets/parachain-system/src/lib.rs
@@ -1447,7 +1447,7 @@ impl<T: Config> Pallet<T> {
 			hrmp_max_message_num_per_candidate: 2,
 			validation_upgrade_cooldown: 2,
 			validation_upgrade_delay: 2,
-			async_backing_params: relay_chain::vstaging::AsyncBackingParams {
+			async_backing_params: relay_chain::AsyncBackingParams {
 				allowed_ancestry_len: 0,
 				max_candidate_depth: 0,
 			},
diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml
index 223a78dacc4..114b25d1261 100644
--- a/cumulus/parachain-template/node/Cargo.toml
+++ b/cumulus/parachain-template/node/Cargo.toml
@@ -89,7 +89,4 @@ try-runtime = [
 	"polkadot-cli/try-runtime",
 	"sp-runtime/try-runtime",
 ]
-network-protocol-staging = [
-	"cumulus-client-service/network-protocol-staging",
-	"polkadot-cli/network-protocol-staging",
-]
+
diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs
index 7461165f2a1..2804128ec01 100644
--- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs
@@ -64,7 +64,7 @@ decl_test_relay_chains! {
 			Hrmp: kusama_runtime::Hrmp,
 		}
 	},
-	#[api_version(6)]
+	#[api_version(7)]
 	pub struct Westend {
 		genesis = westend::genesis(),
 		on_init = (),
@@ -79,7 +79,7 @@ decl_test_relay_chains! {
 			Balances: westend_runtime::Balances,
 		}
 	},
-	#[api_version(5)]
+	#[api_version(7)]
 	pub struct Rococo {
 		genesis = rococo::genesis(),
 		on_init = (),
@@ -94,7 +94,7 @@ decl_test_relay_chains! {
 			Balances: rococo_runtime::Balances,
 		}
 	},
-	#[api_version(5)]
+	#[api_version(7)]
 	pub struct Wococo {
 		genesis = rococo::genesis(),
 		on_init = (),
diff --git a/cumulus/test/relay-sproof-builder/src/lib.rs b/cumulus/test/relay-sproof-builder/src/lib.rs
index 69a82d05d81..fbd2692a36b 100644
--- a/cumulus/test/relay-sproof-builder/src/lib.rs
+++ b/cumulus/test/relay-sproof-builder/src/lib.rs
@@ -63,7 +63,7 @@ impl Default for RelayStateSproofBuilder {
 				hrmp_max_message_num_per_candidate: 5,
 				validation_upgrade_cooldown: 6,
 				validation_upgrade_delay: 6,
-				async_backing_params: relay_chain::vstaging::AsyncBackingParams {
+				async_backing_params: relay_chain::AsyncBackingParams {
 					allowed_ancestry_len: 0,
 					max_candidate_depth: 0,
 				},
diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml
index aacc6ad405c..6e82cb69f6e 100644
--- a/polkadot/Cargo.toml
+++ b/polkadot/Cargo.toml
@@ -68,7 +68,6 @@ jemalloc-allocator = [
 # Enables timeout-based tests supposed to be run only in CI environment as they may be flaky
 # when run locally depending on system load
 ci-only-tests = [ "polkadot-node-core-pvf/ci-only-tests" ]
-network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ]
 
 # Configuration for building a .deb package - for use with `cargo-deb`
 [package.metadata.deb]
diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml
index 53961c90a2a..799a229b6ad 100644
--- a/polkadot/cli/Cargo.toml
+++ b/polkadot/cli/Cargo.toml
@@ -76,4 +76,3 @@ runtime-metrics = [
 	"polkadot-node-metrics/runtime-metrics",
 	"service/runtime-metrics",
 ]
-network-protocol-staging = [ "service/network-protocol-staging" ]
diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs
index 27779f3d1ac..4e13755deed 100644
--- a/polkadot/node/collation-generation/src/lib.rs
+++ b/polkadot/node/collation-generation/src/lib.rs
@@ -43,9 +43,8 @@ use polkadot_node_subsystem::{
 	SubsystemContext, SubsystemError, SubsystemResult,
 };
 use polkadot_node_subsystem_util::{
-	request_availability_cores, request_persisted_validation_data,
-	request_staging_async_backing_params, request_validation_code, request_validation_code_hash,
-	request_validators,
+	request_async_backing_params, request_availability_cores, request_persisted_validation_data,
+	request_validation_code, request_validation_code_hash, request_validators,
 };
 use polkadot_primitives::{
 	collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt,
@@ -208,7 +207,7 @@ async fn handle_new_activations<Context>(
 		let (availability_cores, validators, async_backing_params) = join!(
 			request_availability_cores(relay_parent, ctx.sender()).await,
 			request_validators(relay_parent, ctx.sender()).await,
-			request_staging_async_backing_params(relay_parent, ctx.sender()).await,
+			request_async_backing_params(relay_parent, ctx.sender()).await,
 		);
 
 		let availability_cores = availability_cores??;
diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs
index da6b343e6ae..9094f40cca8 100644
--- a/polkadot/node/collation-generation/src/tests.rs
+++ b/polkadot/node/collation-generation/src/tests.rs
@@ -153,7 +153,7 @@ fn requests_availability_per_relay_parent() {
 				}
 				Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 					_hash,
-					RuntimeApiRequest::StagingAsyncBackingParams(
+					RuntimeApiRequest::AsyncBackingParams(
 						tx,
 					),
 				))) => {
@@ -235,7 +235,7 @@ fn requests_validation_data_for_scheduled_matches() {
 				},
 				Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 					_hash,
-					RuntimeApiRequest::StagingAsyncBackingParams(tx),
+					RuntimeApiRequest::AsyncBackingParams(tx),
 				))) => {
 					tx.send(Err(RuntimeApiError::NotSupported {
 						runtime_api_name: "doesnt_matter",
@@ -332,7 +332,7 @@ fn sends_distribute_collation_message() {
 				},
 				Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 					_hash,
-					RuntimeApiRequest::StagingAsyncBackingParams(tx),
+					RuntimeApiRequest::AsyncBackingParams(tx),
 				))) => {
 					tx.send(Err(RuntimeApiError::NotSupported {
 						runtime_api_name: "doesnt_matter",
@@ -494,7 +494,7 @@ fn fallback_when_no_validation_code_hash_api() {
 				},
 				Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 					_hash,
-					RuntimeApiRequest::StagingAsyncBackingParams(tx),
+					RuntimeApiRequest::AsyncBackingParams(tx),
 				))) => {
 					tx.send(Err(RuntimeApiError::NotSupported {
 						runtime_api_name: "doesnt_matter",
diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs
index 4c2fd6becb4..bdc8b3fa1af 100644
--- a/polkadot/node/core/backing/src/tests/mod.rs
+++ b/polkadot/node/core/backing/src/tests/mod.rs
@@ -237,7 +237,7 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS
 	assert_matches!(
 		virtual_overseer.recv().await,
 		AllMessages::RuntimeApi(
-			RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx))
+			RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx))
 		) if parent == test_state.relay_parent => {
 			tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap();
 		}
diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs
index 14f720b721f..b79515ed37a 100644
--- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs
+++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs
@@ -20,12 +20,12 @@ use polkadot_node_subsystem::{
 	messages::{ChainApiMessage, FragmentTreeMembership},
 	ActivatedLeaf, TimeoutExt,
 };
-use polkadot_primitives::{vstaging as vstaging_primitives, BlockNumber, Header, OccupiedCore};
+use polkadot_primitives::{AsyncBackingParams, BlockNumber, Header, OccupiedCore};
 
 use super::*;
 
-const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams =
-	vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 };
+const ASYNC_BACKING_PARAMETERS: AsyncBackingParams =
+	AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 };
 
 struct TestLeaf {
 	activated: ActivatedLeaf,
@@ -56,7 +56,7 @@ async fn activate_leaf(
 	assert_matches!(
 		virtual_overseer.recv().await,
 		AllMessages::RuntimeApi(
-			RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx))
+			RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx))
 		) if parent == leaf_hash => {
 			tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap();
 		}
diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs
index 9cd544a8c53..e44530b3f1b 100644
--- a/polkadot/node/core/dispute-coordinator/src/initialized.rs
+++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs
@@ -43,7 +43,7 @@ use polkadot_node_subsystem_util::runtime::{
 	self, key_ownership_proof, submit_report_dispute_lost, RuntimeInfo,
 };
 use polkadot_primitives::{
-	vstaging, BlockNumber, CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement,
+	slashing, BlockNumber, CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement,
 	DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, ValidDisputeStatementKind,
 	ValidatorId, ValidatorIndex,
 };
@@ -385,7 +385,7 @@ impl Initialized {
 		&mut self,
 		ctx: &mut Context,
 		relay_parent: Hash,
-		unapplied_slashes: Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>,
+		unapplied_slashes: Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>,
 	) {
 		for (session_index, candidate_hash, pending) in unapplied_slashes {
 			gum::info!(
@@ -422,11 +422,9 @@ impl Initialized {
 					match res {
 						Ok(Some(key_ownership_proof)) => {
 							key_ownership_proofs.push(key_ownership_proof);
-							let time_slot = vstaging::slashing::DisputesTimeSlot::new(
-								session_index,
-								candidate_hash,
-							);
-							let dispute_proof = vstaging::slashing::DisputeProof {
+							let time_slot =
+								slashing::DisputesTimeSlot::new(session_index, candidate_hash);
+							let dispute_proof = slashing::DisputeProof {
 								time_slot,
 								kind: pending.kind,
 								validator_index: *validator_index,
diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs
index ed2988fcb39..292e4ebe528 100644
--- a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs
+++ b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs
@@ -96,10 +96,10 @@ use std::{
 
 use super::LOG_TARGET;
 use bitvec::prelude::*;
-use polkadot_node_subsystem_util::inclusion_emulator::staging::{
+use polkadot_node_subsystem_util::inclusion_emulator::{
 	ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo,
 };
-use polkadot_primitives::vstaging::{
+use polkadot_primitives::{
 	BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, Id as ParaId,
 	PersistedValidationData,
 };
@@ -981,10 +981,8 @@ impl FragmentNode {
 mod tests {
 	use super::*;
 	use assert_matches::assert_matches;
-	use polkadot_node_subsystem_util::inclusion_emulator::staging::InboundHrmpLimitations;
-	use polkadot_primitives::vstaging::{
-		BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData,
-	};
+	use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations;
+	use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData};
 	use polkadot_primitives_test_helpers as test_helpers;
 
 	fn make_constraints(
diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs
index 6e5844a62a1..fcca0dd0b53 100644
--- a/polkadot/node/core/prospective-parachains/src/lib.rs
+++ b/polkadot/node/core/prospective-parachains/src/lib.rs
@@ -22,7 +22,7 @@
 //! backing phases of parachain consensus.
 //!
 //! This is primarily an implementation of "Fragment Trees", as described in
-//! [`polkadot_node_subsystem_util::inclusion_emulator::staging`].
+//! [`polkadot_node_subsystem_util::inclusion_emulator`].
 //!
 //! This subsystem also handles concerns such as the relay-chain being forkful and session changes.
 
@@ -42,13 +42,14 @@ use polkadot_node_subsystem::{
 	overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError,
 };
 use polkadot_node_subsystem_util::{
-	inclusion_emulator::staging::{Constraints, RelayChainBlockInfo},
+	inclusion_emulator::{Constraints, RelayChainBlockInfo},
 	request_session_index_for_child,
 	runtime::{prospective_parachains_mode, ProspectiveParachainsMode},
 };
-use polkadot_primitives::vstaging::{
-	BlockNumber, CandidateHash, CandidatePendingAvailability, CommittedCandidateReceipt, CoreState,
-	Hash, HeadData, Header, Id as ParaId, PersistedValidationData,
+use polkadot_primitives::{
+	async_backing::CandidatePendingAvailability, BlockNumber, CandidateHash,
+	CommittedCandidateReceipt, CoreState, Hash, HeadData, Header, Id as ParaId,
+	PersistedValidationData,
 };
 
 use crate::{
@@ -792,7 +793,7 @@ async fn fetch_backing_state<Context>(
 	let (tx, rx) = oneshot::channel();
 	ctx.send_message(RuntimeApiMessage::Request(
 		relay_parent,
-		RuntimeApiRequest::StagingParaBackingState(para_id, tx),
+		RuntimeApiRequest::ParaBackingState(para_id, tx),
 	))
 	.await;
 
diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs
index eb12ea4537f..d2cd23fe95f 100644
--- a/polkadot/node/core/prospective-parachains/src/tests.rs
+++ b/polkadot/node/core/prospective-parachains/src/tests.rs
@@ -25,7 +25,7 @@ use polkadot_node_subsystem::{
 };
 use polkadot_node_subsystem_test_helpers as test_helpers;
 use polkadot_primitives::{
-	vstaging::{AsyncBackingParams, BackingState, Constraints, InboundHrmpLimitations},
+	async_backing::{AsyncBackingParams, BackingState, Constraints, InboundHrmpLimitations},
 	CommittedCandidateReceipt, HeadData, Header, PersistedValidationData, ScheduledCore,
 	ValidationCodeHash,
 };
@@ -219,7 +219,7 @@ async fn handle_leaf_activation(
 	assert_matches!(
 		virtual_overseer.recv().await,
 		AllMessages::RuntimeApi(
-			RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx))
+			RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx))
 		) if parent == *hash => {
 			tx.send(Ok(async_backing_params)).unwrap();
 		}
@@ -284,7 +284,7 @@ async fn handle_leaf_activation(
 		let para_id = match message {
 			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 				_,
-				RuntimeApiRequest::StagingParaBackingState(p_id, _),
+				RuntimeApiRequest::ParaBackingState(p_id, _),
 			)) => p_id,
 			_ => panic!("received unexpected message {:?}", message),
 		};
@@ -303,7 +303,7 @@ async fn handle_leaf_activation(
 		assert_matches!(
 			message,
 			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingParaBackingState(p_id, tx))
+				RuntimeApiMessage::Request(parent, RuntimeApiRequest::ParaBackingState(p_id, tx))
 			) if parent == *hash && p_id == para_id => {
 				tx.send(Ok(Some(backing_state))).unwrap();
 			}
@@ -499,7 +499,7 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() {
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx))
+				RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx))
 			) if parent == hash => {
 				tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap();
 			}
@@ -1569,7 +1569,7 @@ fn uses_ancestry_only_within_session() {
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx))
+				RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx))
 			) if parent == hash => {
 				tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len })).unwrap();
 			}
diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs
index 7f41d74e616..e05e5823a28 100644
--- a/polkadot/node/core/runtime-api/src/cache.rs
+++ b/polkadot/node/core/runtime-api/src/cache.rs
@@ -20,12 +20,12 @@ use schnellru::{ByLength, LruMap};
 use sp_consensus_babe::Epoch;
 
 use polkadot_primitives::{
-	vstaging, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent,
-	CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams,
-	GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage,
-	OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes,
-	SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex,
-	ValidatorSignature,
+	async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments,
+	CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState,
+	ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage,
+	InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement,
+	ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash,
+	ValidatorId, ValidatorIndex, ValidatorSignature,
 };
 
 /// For consistency we have the same capacity for all caches. We use 128 as we'll only need that
@@ -61,14 +61,11 @@ pub(crate) struct RequestResultCache {
 		LruMap<(Hash, ParaId, OccupiedCoreAssumption), Option<ValidationCodeHash>>,
 	version: LruMap<Hash, u32>,
 	disputes: LruMap<Hash, Vec<(SessionIndex, CandidateHash, DisputeState<BlockNumber>)>>,
-	unapplied_slashes:
-		LruMap<Hash, Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>>,
-	key_ownership_proof:
-		LruMap<(Hash, ValidatorId), Option<vstaging::slashing::OpaqueKeyOwnershipProof>>,
+	unapplied_slashes: LruMap<Hash, Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>>,
+	key_ownership_proof: LruMap<(Hash, ValidatorId), Option<slashing::OpaqueKeyOwnershipProof>>,
 	minimum_backing_votes: LruMap<SessionIndex, u32>,
-
-	staging_para_backing_state: LruMap<(Hash, ParaId), Option<vstaging::BackingState>>,
-	staging_async_backing_params: LruMap<Hash, vstaging::AsyncBackingParams>,
+	para_backing_state: LruMap<(Hash, ParaId), Option<async_backing::BackingState>>,
+	async_backing_params: LruMap<Hash, async_backing::AsyncBackingParams>,
 }
 
 impl Default for RequestResultCache {
@@ -100,8 +97,8 @@ impl Default for RequestResultCache {
 			key_ownership_proof: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)),
 			minimum_backing_votes: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)),
 
-			staging_para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)),
-			staging_async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)),
+			para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)),
+			async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)),
 		}
 	}
 }
@@ -401,14 +398,14 @@ impl RequestResultCache {
 	pub(crate) fn unapplied_slashes(
 		&mut self,
 		relay_parent: &Hash,
-	) -> Option<&Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>> {
+	) -> Option<&Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>> {
 		self.unapplied_slashes.get(relay_parent).map(|v| &*v)
 	}
 
 	pub(crate) fn cache_unapplied_slashes(
 		&mut self,
 		relay_parent: Hash,
-		value: Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>,
+		value: Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>,
 	) {
 		self.unapplied_slashes.insert(relay_parent, value);
 	}
@@ -416,14 +413,14 @@ impl RequestResultCache {
 	pub(crate) fn key_ownership_proof(
 		&mut self,
 		key: (Hash, ValidatorId),
-	) -> Option<&Option<vstaging::slashing::OpaqueKeyOwnershipProof>> {
+	) -> Option<&Option<slashing::OpaqueKeyOwnershipProof>> {
 		self.key_ownership_proof.get(&key).map(|v| &*v)
 	}
 
 	pub(crate) fn cache_key_ownership_proof(
 		&mut self,
 		key: (Hash, ValidatorId),
-		value: Option<vstaging::slashing::OpaqueKeyOwnershipProof>,
+		value: Option<slashing::OpaqueKeyOwnershipProof>,
 	) {
 		self.key_ownership_proof.insert(key, value);
 	}
@@ -431,7 +428,7 @@ impl RequestResultCache {
 	// This request is never cached, hence always returns `None`.
 	pub(crate) fn submit_report_dispute_lost(
 		&mut self,
-		_key: (Hash, vstaging::slashing::DisputeProof, vstaging::slashing::OpaqueKeyOwnershipProof),
+		_key: (Hash, slashing::DisputeProof, slashing::OpaqueKeyOwnershipProof),
 	) -> Option<&Option<()>> {
 		None
 	}
@@ -448,34 +445,34 @@ impl RequestResultCache {
 		self.minimum_backing_votes.insert(session_index, minimum_backing_votes);
 	}
 
-	pub(crate) fn staging_para_backing_state(
+	pub(crate) fn para_backing_state(
 		&mut self,
 		key: (Hash, ParaId),
-	) -> Option<&Option<vstaging::BackingState>> {
-		self.staging_para_backing_state.get(&key).map(|v| &*v)
+	) -> Option<&Option<async_backing::BackingState>> {
+		self.para_backing_state.get(&key).map(|v| &*v)
 	}
 
-	pub(crate) fn cache_staging_para_backing_state(
+	pub(crate) fn cache_para_backing_state(
 		&mut self,
 		key: (Hash, ParaId),
-		value: Option<vstaging::BackingState>,
+		value: Option<async_backing::BackingState>,
 	) {
-		self.staging_para_backing_state.insert(key, value);
+		self.para_backing_state.insert(key, value);
 	}
 
-	pub(crate) fn staging_async_backing_params(
+	pub(crate) fn async_backing_params(
 		&mut self,
 		key: &Hash,
-	) -> Option<&vstaging::AsyncBackingParams> {
-		self.staging_async_backing_params.get(key).map(|v| &*v)
+	) -> Option<&async_backing::AsyncBackingParams> {
+		self.async_backing_params.get(key).map(|v| &*v)
 	}
 
-	pub(crate) fn cache_staging_async_backing_params(
+	pub(crate) fn cache_async_backing_params(
 		&mut self,
 		key: Hash,
-		value: vstaging::AsyncBackingParams,
+		value: async_backing::AsyncBackingParams,
 	) {
-		self.staging_async_backing_params.insert(key, value);
+		self.async_backing_params.insert(key, value);
 	}
 }
 
@@ -515,16 +512,15 @@ pub(crate) enum RequestResult {
 	ValidationCodeHash(Hash, ParaId, OccupiedCoreAssumption, Option<ValidationCodeHash>),
 	Version(Hash, u32),
 	Disputes(Hash, Vec<(SessionIndex, CandidateHash, DisputeState<BlockNumber>)>),
-	UnappliedSlashes(Hash, Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>),
-	KeyOwnershipProof(Hash, ValidatorId, Option<vstaging::slashing::OpaqueKeyOwnershipProof>),
+	UnappliedSlashes(Hash, Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>),
+	KeyOwnershipProof(Hash, ValidatorId, Option<slashing::OpaqueKeyOwnershipProof>),
 	// This is a request with side-effects.
 	SubmitReportDisputeLost(
 		Hash,
-		vstaging::slashing::DisputeProof,
-		vstaging::slashing::OpaqueKeyOwnershipProof,
+		slashing::DisputeProof,
+		slashing::OpaqueKeyOwnershipProof,
 		Option<()>,
 	),
-
-	StagingParaBackingState(Hash, ParaId, Option<vstaging::BackingState>),
-	StagingAsyncBackingParams(Hash, vstaging::AsyncBackingParams),
+	ParaBackingState(Hash, ParaId, Option<async_backing::BackingState>),
+	AsyncBackingParams(Hash, async_backing::AsyncBackingParams),
 }
diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs
index ec9bf10fa6e..19b2f5565a2 100644
--- a/polkadot/node/core/runtime-api/src/lib.rs
+++ b/polkadot/node/core/runtime-api/src/lib.rs
@@ -166,12 +166,11 @@ where
 				.requests_cache
 				.cache_key_ownership_proof((relay_parent, validator_id), key_ownership_proof),
 			SubmitReportDisputeLost(_, _, _, _) => {},
-
-			StagingParaBackingState(relay_parent, para_id, constraints) => self
+			ParaBackingState(relay_parent, para_id, constraints) => self
 				.requests_cache
-				.cache_staging_para_backing_state((relay_parent, para_id), constraints),
-			StagingAsyncBackingParams(relay_parent, params) =>
-				self.requests_cache.cache_staging_async_backing_params(relay_parent, params),
+				.cache_para_backing_state((relay_parent, para_id), constraints),
+			AsyncBackingParams(relay_parent, params) =>
+				self.requests_cache.cache_async_backing_params(relay_parent, params),
 		}
 	}
 
@@ -297,13 +296,10 @@ where
 						Request::SubmitReportDisputeLost(dispute_proof, key_ownership_proof, sender)
 					},
 				),
-
-			Request::StagingParaBackingState(para, sender) =>
-				query!(staging_para_backing_state(para), sender)
-					.map(|sender| Request::StagingParaBackingState(para, sender)),
-			Request::StagingAsyncBackingParams(sender) =>
-				query!(staging_async_backing_params(), sender)
-					.map(|sender| Request::StagingAsyncBackingParams(sender)),
+			Request::ParaBackingState(para, sender) => query!(para_backing_state(para), sender)
+				.map(|sender| Request::ParaBackingState(para, sender)),
+			Request::AsyncBackingParams(sender) => query!(async_backing_params(), sender)
+				.map(|sender| Request::AsyncBackingParams(sender)),
 			Request::MinimumBackingVotes(index, sender) => {
 				if let Some(value) = self.requests_cache.minimum_backing_votes(index) {
 					self.metrics.on_cached_request();
@@ -569,19 +565,18 @@ where
 			ver = Request::MINIMUM_BACKING_VOTES_RUNTIME_REQUIREMENT,
 			sender
 		),
-
-		Request::StagingParaBackingState(para, sender) => {
+		Request::ParaBackingState(para, sender) => {
 			query!(
-				StagingParaBackingState,
-				staging_para_backing_state(para),
+				ParaBackingState,
+				para_backing_state(para),
 				ver = Request::STAGING_BACKING_STATE,
 				sender
 			)
 		},
-		Request::StagingAsyncBackingParams(sender) => {
+		Request::AsyncBackingParams(sender) => {
 			query!(
-				StagingAsyncBackingParams,
-				staging_async_backing_params(),
+				AsyncBackingParams,
+				async_backing_params(),
 				ver = Request::STAGING_BACKING_STATE,
 				sender
 			)
diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs
index bb7c2968961..fb97139a802 100644
--- a/polkadot/node/core/runtime-api/src/tests.rs
+++ b/polkadot/node/core/runtime-api/src/tests.rs
@@ -20,9 +20,9 @@ use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfigurati
 use polkadot_node_subsystem::SpawnGlue;
 use polkadot_node_subsystem_test_helpers::make_subsystem_context;
 use polkadot_primitives::{
-	vstaging, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent,
-	CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams,
-	GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage,
+	async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments,
+	CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState,
+	ExecutorParams, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage,
 	OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes,
 	SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, ValidatorId,
 	ValidatorIndex, ValidatorSignature,
@@ -213,7 +213,7 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient {
 	async fn unapplied_slashes(
 		&self,
 		_: Hash,
-	) -> Result<Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>, ApiError> {
+	) -> Result<Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>, ApiError> {
 		todo!("Not required for tests")
 	}
 
@@ -221,15 +221,15 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient {
 		&self,
 		_: Hash,
 		_: ValidatorId,
-	) -> Result<Option<vstaging::slashing::OpaqueKeyOwnershipProof>, ApiError> {
+	) -> Result<Option<slashing::OpaqueKeyOwnershipProof>, ApiError> {
 		todo!("Not required for tests")
 	}
 
 	async fn submit_report_dispute_lost(
 		&self,
 		_: Hash,
-		_: vstaging::slashing::DisputeProof,
-		_: vstaging::slashing::OpaqueKeyOwnershipProof,
+		_: slashing::DisputeProof,
+		_: slashing::OpaqueKeyOwnershipProof,
 	) -> Result<Option<()>, ApiError> {
 		todo!("Not required for tests")
 	}
@@ -250,18 +250,18 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient {
 		Ok(self.authorities.clone())
 	}
 
-	async fn staging_async_backing_params(
+	async fn async_backing_params(
 		&self,
 		_: Hash,
-	) -> Result<vstaging::AsyncBackingParams, ApiError> {
+	) -> Result<async_backing::AsyncBackingParams, ApiError> {
 		todo!("Not required for tests")
 	}
 
-	async fn staging_para_backing_state(
+	async fn para_backing_state(
 		&self,
 		_: Hash,
 		_: ParaId,
-	) -> Result<Option<vstaging::BackingState>, ApiError> {
+	) -> Result<Option<async_backing::BackingState>, ApiError> {
 		todo!("Not required for tests")
 	}
 
diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs
index 70c20437d12..f76826d7fdf 100644
--- a/polkadot/node/network/approval-distribution/src/lib.rs
+++ b/polkadot/node/network/approval-distribution/src/lib.rs
@@ -26,8 +26,8 @@ use polkadot_node_network_protocol::{
 	self as net_protocol,
 	grid_topology::{RandomRouting, RequiredRouting, SessionGridTopologies, SessionGridTopology},
 	peer_set::{ValidationVersion, MAX_NOTIFICATION_SIZE},
-	v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, UnifiedReputationChange as Rep,
-	Versioned, VersionedValidationProtocol, View,
+	v1 as protocol_v1, v2 as protocol_v2, PeerId, UnifiedReputationChange as Rep, Versioned,
+	VersionedValidationProtocol, View,
 };
 use polkadot_node_primitives::approval::{
 	AssignmentCert, BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote,
@@ -602,9 +602,7 @@ impl State {
 	{
 		match msg {
 			Versioned::V1(protocol_v1::ApprovalDistributionMessage::Assignments(assignments)) |
-			Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Assignments(
-				assignments,
-			)) => {
+			Versioned::V2(protocol_v2::ApprovalDistributionMessage::Assignments(assignments)) => {
 				gum::trace!(
 					target: LOG_TARGET,
 					peer_id = %peer_id,
@@ -644,9 +642,7 @@ impl State {
 				}
 			},
 			Versioned::V1(protocol_v1::ApprovalDistributionMessage::Approvals(approvals)) |
-			Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Approvals(
-				approvals,
-			)) => {
+			Versioned::V2(protocol_v2::ApprovalDistributionMessage::Approvals(approvals)) => {
 				gum::trace!(
 					target: LOG_TARGET,
 					peer_id = %peer_id,
@@ -1060,7 +1056,7 @@ impl State {
 			route_random
 		};
 
-		let (v1_peers, vstaging_peers) = {
+		let (v1_peers, v2_peers) = {
 			let peer_data = &self.peer_data;
 			let peers = entry
 				.known_by
@@ -1090,9 +1086,9 @@ impl State {
 			}
 
 			let v1_peers = filter_peers_by_version(&peers, ValidationVersion::V1);
-			let vstaging_peers = filter_peers_by_version(&peers, ValidationVersion::VStaging);
+			let v2_peers = filter_peers_by_version(&peers, ValidationVersion::V2);
 
-			(v1_peers, vstaging_peers)
+			(v1_peers, v2_peers)
 		};
 
 		if !v1_peers.is_empty() {
@@ -1103,10 +1099,10 @@ impl State {
 			.await;
 		}
 
-		if !vstaging_peers.is_empty() {
+		if !v2_peers.is_empty() {
 			ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
-				vstaging_peers,
-				versioned_assignments_packet(ValidationVersion::VStaging, assignments.clone()),
+				v2_peers,
+				versioned_assignments_packet(ValidationVersion::V2, assignments.clone()),
 			))
 			.await;
 		}
@@ -1395,7 +1391,7 @@ impl State {
 			in_topology || knowledge.sent.contains(message_subject, MessageKind::Assignment)
 		};
 
-		let (v1_peers, vstaging_peers) = {
+		let (v1_peers, v2_peers) = {
 			let peer_data = &self.peer_data;
 			let peers = entry
 				.known_by
@@ -1425,9 +1421,9 @@ impl State {
 			}
 
 			let v1_peers = filter_peers_by_version(&peers, ValidationVersion::V1);
-			let vstaging_peers = filter_peers_by_version(&peers, ValidationVersion::VStaging);
+			let v2_peers = filter_peers_by_version(&peers, ValidationVersion::V2);
 
-			(v1_peers, vstaging_peers)
+			(v1_peers, v2_peers)
 		};
 
 		let approvals = vec![vote];
@@ -1440,10 +1436,10 @@ impl State {
 			.await;
 		}
 
-		if !vstaging_peers.is_empty() {
+		if !v2_peers.is_empty() {
 			ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
-				vstaging_peers,
-				versioned_approvals_packet(ValidationVersion::VStaging, approvals),
+				v2_peers,
+				versioned_approvals_packet(ValidationVersion::V2, approvals),
 			))
 			.await;
 		}
@@ -2017,9 +2013,9 @@ fn versioned_approvals_packet(
 			Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution(
 				protocol_v1::ApprovalDistributionMessage::Approvals(approvals),
 			)),
-		ValidationVersion::VStaging =>
-			Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution(
-				protocol_vstaging::ApprovalDistributionMessage::Approvals(approvals),
+		ValidationVersion::V2 =>
+			Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution(
+				protocol_v2::ApprovalDistributionMessage::Approvals(approvals),
 			)),
 	}
 }
@@ -2033,9 +2029,9 @@ fn versioned_assignments_packet(
 			Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution(
 				protocol_v1::ApprovalDistributionMessage::Assignments(assignments),
 			)),
-		ValidationVersion::VStaging =>
-			Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution(
-				protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments),
+		ValidationVersion::V2 =>
+			Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution(
+				protocol_v2::ApprovalDistributionMessage::Assignments(assignments),
 			)),
 	}
 }
diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs
index 1e9ae7b6200..29c7d8aa45d 100644
--- a/polkadot/node/network/approval-distribution/src/tests.rs
+++ b/polkadot/node/network/approval-distribution/src/tests.rs
@@ -2388,9 +2388,9 @@ fn import_versioned_approval() {
 	let _ = test_harness(state, |mut virtual_overseer| async move {
 		let overseer = &mut virtual_overseer;
 		// All peers are aware of relay parent.
-		setup_peer_with_view(overseer, &peer_a, ValidationVersion::VStaging, view![hash]).await;
+		setup_peer_with_view(overseer, &peer_a, ValidationVersion::V2, view![hash]).await;
 		setup_peer_with_view(overseer, &peer_b, ValidationVersion::V1, view![hash]).await;
-		setup_peer_with_view(overseer, &peer_c, ValidationVersion::VStaging, view![hash]).await;
+		setup_peer_with_view(overseer, &peer_c, ValidationVersion::V2, view![hash]).await;
 
 		// new block `hash_a` with 1 candidates
 		let meta = BlockApprovalMeta {
@@ -2431,8 +2431,8 @@ fn import_versioned_approval() {
 			overseer_recv(overseer).await,
 			AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(
 				peers,
-				Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution(
-					protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments)
+				Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution(
+					protocol_v2::ApprovalDistributionMessage::Assignments(assignments)
 				))
 			)) => {
 				assert_eq!(peers.len(), 2);
@@ -2450,8 +2450,8 @@ fn import_versioned_approval() {
 			validator: validator_index,
 			signature: dummy_signature(),
 		};
-		let msg = protocol_vstaging::ApprovalDistributionMessage::Approvals(vec![approval.clone()]);
-		send_message_from_peer(overseer, &peer_a, Versioned::VStaging(msg)).await;
+		let msg = protocol_v2::ApprovalDistributionMessage::Approvals(vec![approval.clone()]);
+		send_message_from_peer(overseer, &peer_a, Versioned::V2(msg)).await;
 
 		assert_matches!(
 			overseer_recv(overseer).await,
@@ -2483,8 +2483,8 @@ fn import_versioned_approval() {
 			overseer_recv(overseer).await,
 			AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(
 				peers,
-				Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution(
-					protocol_vstaging::ApprovalDistributionMessage::Approvals(approvals)
+				Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution(
+					protocol_v2::ApprovalDistributionMessage::Approvals(approvals)
 				))
 			)) => {
 				assert_eq!(peers, vec![peer_c]);
diff --git a/polkadot/node/network/bitfield-distribution/src/lib.rs b/polkadot/node/network/bitfield-distribution/src/lib.rs
index c85d874bc4d..68e381ab6be 100644
--- a/polkadot/node/network/bitfield-distribution/src/lib.rs
+++ b/polkadot/node/network/bitfield-distribution/src/lib.rs
@@ -31,8 +31,8 @@ use polkadot_node_network_protocol::{
 		GridNeighbors, RandomRouting, RequiredRouting, SessionBoundGridTopologyStorage,
 	},
 	peer_set::{ProtocolVersion, ValidationVersion},
-	v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId,
-	UnifiedReputationChange as Rep, Versioned, View,
+	v1 as protocol_v1, v2 as protocol_v2, OurView, PeerId, UnifiedReputationChange as Rep,
+	Versioned, View,
 };
 use polkadot_node_subsystem::{
 	jaeger, messages::*, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan,
@@ -96,8 +96,8 @@ impl BitfieldGossipMessage {
 					self.relay_parent,
 					self.signed_availability.into(),
 				)),
-			Some(ValidationVersion::VStaging) =>
-				Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield(
+			Some(ValidationVersion::V2) =>
+				Versioned::V2(protocol_v2::BitfieldDistributionMessage::Bitfield(
 					self.relay_parent,
 					self.signed_availability.into(),
 				)),
@@ -502,8 +502,7 @@ async fn relay_message<Context>(
 		};
 
 		let v1_interested_peers = filter_by_version(&interested_peers, ValidationVersion::V1);
-		let vstaging_interested_peers =
-			filter_by_version(&interested_peers, ValidationVersion::VStaging);
+		let v2_interested_peers = filter_by_version(&interested_peers, ValidationVersion::V2);
 
 		if !v1_interested_peers.is_empty() {
 			ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
@@ -513,10 +512,10 @@ async fn relay_message<Context>(
 			.await;
 		}
 
-		if !vstaging_interested_peers.is_empty() {
+		if !v2_interested_peers.is_empty() {
 			ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
-				vstaging_interested_peers,
-				message.into_validation_protocol(ValidationVersion::VStaging.into()),
+				v2_interested_peers,
+				message.into_validation_protocol(ValidationVersion::V2.into()),
 			))
 			.await
 		}
@@ -538,7 +537,7 @@ async fn process_incoming_peer_message<Context>(
 			relay_parent,
 			bitfield,
 		)) => (relay_parent, bitfield),
-		Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield(
+		Versioned::V2(protocol_v2::BitfieldDistributionMessage::Bitfield(
 			relay_parent,
 			bitfield,
 		)) => (relay_parent, bitfield),
diff --git a/polkadot/node/network/bitfield-distribution/src/tests.rs b/polkadot/node/network/bitfield-distribution/src/tests.rs
index d6795247e78..ba2434ea47d 100644
--- a/polkadot/node/network/bitfield-distribution/src/tests.rs
+++ b/polkadot/node/network/bitfield-distribution/src/tests.rs
@@ -1111,9 +1111,9 @@ fn network_protocol_versioning() {
 	let peer_c = PeerId::random();
 
 	let peers = [
-		(peer_a, ValidationVersion::VStaging),
+		(peer_a, ValidationVersion::V2),
 		(peer_b, ValidationVersion::V1),
-		(peer_c, ValidationVersion::VStaging),
+		(peer_c, ValidationVersion::V2),
 	];
 
 	// validator 0 key pair
@@ -1173,7 +1173,7 @@ fn network_protocol_versioning() {
 			&Default::default(),
 			NetworkBridgeEvent::PeerMessage(
 				peer_a,
-				msg.clone().into_network_message(ValidationVersion::VStaging.into()),
+				msg.clone().into_network_message(ValidationVersion::V2.into()),
 			),
 			&mut rng,
 		));
@@ -1201,14 +1201,14 @@ fn network_protocol_versioning() {
 			}
 		);
 
-		// vstaging gossip
+		// v2 gossip
 		assert_matches!(
 			handle.recv().await,
 			AllMessages::NetworkBridgeTx(
 				NetworkBridgeTxMessage::SendValidationMessage(peers, send_msg),
 			) => {
 				assert_eq!(peers, vec![peer_c]);
-				assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::VStaging.into()));
+				assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::V2.into()));
 			}
 		);
 
diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs
index 82c67061d9a..7e86b46a7e0 100644
--- a/polkadot/node/network/bridge/src/rx/mod.rs
+++ b/polkadot/node/network/bridge/src/rx/mod.rs
@@ -33,7 +33,7 @@ use polkadot_node_network_protocol::{
 		CollationVersion, PeerSet, PeerSetProtocolNames, PerPeerSet, ProtocolVersion,
 		ValidationVersion,
 	},
-	v1 as protocol_v1, vstaging as protocol_vstaging, ObservedRole, OurView, PeerId,
+	v1 as protocol_v1, v2 as protocol_v2, ObservedRole, OurView, PeerId,
 	UnifiedReputationChange as Rep, View,
 };
 
@@ -262,13 +262,13 @@ where
 								),
 								&metrics,
 							),
-							ValidationVersion::VStaging => send_message(
+							ValidationVersion::V2 => send_message(
 								&mut network_service,
 								vec![peer],
 								PeerSet::Validation,
 								version,
 								&peerset_protocol_names,
-								WireMessage::<protocol_vstaging::ValidationProtocol>::ViewUpdate(
+								WireMessage::<protocol_v2::ValidationProtocol>::ViewUpdate(
 									local_view,
 								),
 								&metrics,
@@ -304,13 +304,13 @@ where
 								),
 								&metrics,
 							),
-							CollationVersion::VStaging => send_message(
+							CollationVersion::V2 => send_message(
 								&mut network_service,
 								vec![peer],
 								PeerSet::Collation,
 								version,
 								&peerset_protocol_names,
-								WireMessage::<protocol_vstaging::CollationProtocol>::ViewUpdate(
+								WireMessage::<protocol_v2::CollationProtocol>::ViewUpdate(
 									local_view,
 								),
 								&metrics,
@@ -465,9 +465,9 @@ where
 							&metrics,
 						)
 					} else if expected_versions[PeerSet::Validation] ==
-						Some(ValidationVersion::VStaging.into())
+						Some(ValidationVersion::V2.into())
 					{
-						handle_peer_messages::<protocol_vstaging::ValidationProtocol, _>(
+						handle_peer_messages::<protocol_v2::ValidationProtocol, _>(
 							remote,
 							PeerSet::Validation,
 							&mut shared.0.lock().validation_peers,
@@ -507,9 +507,9 @@ where
 							&metrics,
 						)
 					} else if expected_versions[PeerSet::Collation] ==
-						Some(CollationVersion::VStaging.into())
+						Some(CollationVersion::V2.into())
 					{
-						handle_peer_messages::<protocol_vstaging::CollationProtocol, _>(
+						handle_peer_messages::<protocol_v2::CollationProtocol, _>(
 							remote,
 							PeerSet::Collation,
 							&mut shared.0.lock().collation_peers,
@@ -813,10 +813,8 @@ fn update_our_view<Net, Context>(
 	let v1_validation_peers = filter_by_version(&validation_peers, ValidationVersion::V1.into());
 	let v1_collation_peers = filter_by_version(&collation_peers, CollationVersion::V1.into());
 
-	let vstaging_validation_peers =
-		filter_by_version(&validation_peers, ValidationVersion::VStaging.into());
-	let vstaging_collation_peers =
-		filter_by_version(&collation_peers, ValidationVersion::VStaging.into());
+	let v2_validation_peers = filter_by_version(&validation_peers, ValidationVersion::V2.into());
+	let v2_collation_peers = filter_by_version(&collation_peers, ValidationVersion::V2.into());
 
 	send_validation_message_v1(
 		net,
@@ -834,17 +832,17 @@ fn update_our_view<Net, Context>(
 		metrics,
 	);
 
-	send_validation_message_vstaging(
+	send_validation_message_v2(
 		net,
-		vstaging_validation_peers,
+		v2_validation_peers,
 		peerset_protocol_names,
 		WireMessage::ViewUpdate(new_view.clone()),
 		metrics,
 	);
 
-	send_collation_message_vstaging(
+	send_collation_message_v2(
 		net,
-		vstaging_collation_peers,
+		v2_collation_peers,
 		peerset_protocol_names,
 		WireMessage::ViewUpdate(new_view),
 		metrics,
@@ -955,36 +953,36 @@ fn send_collation_message_v1(
 	);
 }
 
-fn send_validation_message_vstaging(
+fn send_validation_message_v2(
 	net: &mut impl Network,
 	peers: Vec<PeerId>,
 	protocol_names: &PeerSetProtocolNames,
-	message: WireMessage<protocol_vstaging::ValidationProtocol>,
+	message: WireMessage<protocol_v2::ValidationProtocol>,
 	metrics: &Metrics,
 ) {
 	send_message(
 		net,
 		peers,
 		PeerSet::Validation,
-		ValidationVersion::VStaging.into(),
+		ValidationVersion::V2.into(),
 		protocol_names,
 		message,
 		metrics,
 	);
 }
 
-fn send_collation_message_vstaging(
+fn send_collation_message_v2(
 	net: &mut impl Network,
 	peers: Vec<PeerId>,
 	protocol_names: &PeerSetProtocolNames,
-	message: WireMessage<protocol_vstaging::CollationProtocol>,
+	message: WireMessage<protocol_v2::CollationProtocol>,
 	metrics: &Metrics,
 ) {
 	send_message(
 		net,
 		peers,
 		PeerSet::Collation,
-		CollationVersion::VStaging.into(),
+		CollationVersion::V2.into(),
 		protocol_names,
 		message,
 		metrics,
diff --git a/polkadot/node/network/bridge/src/rx/tests.rs b/polkadot/node/network/bridge/src/rx/tests.rs
index 127f46e0fa3..7c69cce4839 100644
--- a/polkadot/node/network/bridge/src/rx/tests.rs
+++ b/polkadot/node/network/bridge/src/rx/tests.rs
@@ -1216,10 +1216,10 @@ fn network_protocol_versioning_view_update() {
 
 		let peer_ids: Vec<_> = (0..4).map(|_| PeerId::random()).collect();
 		let peers = [
-			(peer_ids[0], PeerSet::Validation, ValidationVersion::VStaging),
+			(peer_ids[0], PeerSet::Validation, ValidationVersion::V2),
 			(peer_ids[1], PeerSet::Collation, ValidationVersion::V1),
 			(peer_ids[2], PeerSet::Validation, ValidationVersion::V1),
-			(peer_ids[3], PeerSet::Collation, ValidationVersion::VStaging),
+			(peer_ids[3], PeerSet::Collation, ValidationVersion::V2),
 		];
 
 		let head = Hash::repeat_byte(1);
@@ -1245,8 +1245,8 @@ fn network_protocol_versioning_view_update() {
 				ValidationVersion::V1 =>
 					WireMessage::<protocol_v1::ValidationProtocol>::ViewUpdate(view.clone())
 						.encode(),
-				ValidationVersion::VStaging =>
-					WireMessage::<protocol_vstaging::ValidationProtocol>::ViewUpdate(view.clone())
+				ValidationVersion::V2 =>
+					WireMessage::<protocol_v2::ValidationProtocol>::ViewUpdate(view.clone())
 						.encode(),
 			};
 			assert_network_actions_contains(
@@ -1268,12 +1268,7 @@ fn network_protocol_versioning_subsystem_msg() {
 		let peer = PeerId::random();
 
 		network_handle
-			.connect_peer(
-				peer,
-				ValidationVersion::VStaging,
-				PeerSet::Validation,
-				ObservedRole::Full,
-			)
+			.connect_peer(peer, ValidationVersion::V2, PeerSet::Validation, ObservedRole::Full)
 			.await;
 
 		// bridge will inform about all connected peers.
@@ -1282,7 +1277,7 @@ fn network_protocol_versioning_subsystem_msg() {
 				NetworkBridgeEvent::PeerConnected(
 					peer,
 					ObservedRole::Full,
-					ValidationVersion::VStaging.into(),
+					ValidationVersion::V2.into(),
 					None,
 				),
 				&mut virtual_overseer,
@@ -1297,9 +1292,9 @@ fn network_protocol_versioning_subsystem_msg() {
 		}
 
 		let approval_distribution_message =
-			protocol_vstaging::ApprovalDistributionMessage::Approvals(Vec::new());
+			protocol_v2::ApprovalDistributionMessage::Approvals(Vec::new());
 
-		let msg = protocol_vstaging::ValidationProtocol::ApprovalDistribution(
+		let msg = protocol_v2::ValidationProtocol::ApprovalDistribution(
 			approval_distribution_message.clone(),
 		);
 
@@ -1315,7 +1310,7 @@ fn network_protocol_versioning_subsystem_msg() {
 			virtual_overseer.recv().await,
 			AllMessages::ApprovalDistribution(
 				ApprovalDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerMessage(p, Versioned::VStaging(m))
+					NetworkBridgeEvent::PeerMessage(p, Versioned::V2(m))
 				)
 			) => {
 				assert_eq!(p, peer);
@@ -1330,10 +1325,10 @@ fn network_protocol_versioning_subsystem_msg() {
 			signature: sp_core::crypto::UncheckedFrom::unchecked_from([1u8; 64]),
 		};
 		let statement_distribution_message =
-			protocol_vstaging::StatementDistributionMessage::V1Compatibility(
+			protocol_v2::StatementDistributionMessage::V1Compatibility(
 				protocol_v1::StatementDistributionMessage::LargeStatement(metadata),
 			);
-		let msg = protocol_vstaging::ValidationProtocol::StatementDistribution(
+		let msg = protocol_v2::ValidationProtocol::StatementDistribution(
 			statement_distribution_message.clone(),
 		);
 
@@ -1349,7 +1344,7 @@ fn network_protocol_versioning_subsystem_msg() {
 			virtual_overseer.recv().await,
 			AllMessages::StatementDistribution(
 				StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerMessage(p, Versioned::VStaging(m))
+					NetworkBridgeEvent::PeerMessage(p, Versioned::V2(m))
 				)
 			) => {
 				assert_eq!(p, peer);
diff --git a/polkadot/node/network/bridge/src/tx/mod.rs b/polkadot/node/network/bridge/src/tx/mod.rs
index 7fa1149593c..f15635f1f41 100644
--- a/polkadot/node/network/bridge/src/tx/mod.rs
+++ b/polkadot/node/network/bridge/src/tx/mod.rs
@@ -20,7 +20,7 @@ use super::*;
 use polkadot_node_network_protocol::{
 	peer_set::{CollationVersion, PeerSet, PeerSetProtocolNames, ValidationVersion},
 	request_response::ReqProtocolNames,
-	v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, Versioned,
+	v1 as protocol_v1, v2 as protocol_v2, PeerId, Versioned,
 };
 
 use polkadot_node_subsystem::{
@@ -198,7 +198,7 @@ where
 					WireMessage::ProtocolMessage(msg),
 					&metrics,
 				),
-				Versioned::VStaging(msg) => send_validation_message_vstaging(
+				Versioned::V2(msg) => send_validation_message_v2(
 					&mut network_service,
 					peers,
 					peerset_protocol_names,
@@ -223,7 +223,7 @@ where
 						WireMessage::ProtocolMessage(msg),
 						&metrics,
 					),
-					Versioned::VStaging(msg) => send_validation_message_vstaging(
+					Versioned::V2(msg) => send_validation_message_v2(
 						&mut network_service,
 						peers,
 						peerset_protocol_names,
@@ -248,7 +248,7 @@ where
 					WireMessage::ProtocolMessage(msg),
 					&metrics,
 				),
-				Versioned::VStaging(msg) => send_collation_message_vstaging(
+				Versioned::V2(msg) => send_collation_message_v2(
 					&mut network_service,
 					peers,
 					peerset_protocol_names,
@@ -273,7 +273,7 @@ where
 						WireMessage::ProtocolMessage(msg),
 						&metrics,
 					),
-					Versioned::VStaging(msg) => send_collation_message_vstaging(
+					Versioned::V2(msg) => send_collation_message_v2(
 						&mut network_service,
 						peers,
 						peerset_protocol_names,
@@ -296,13 +296,11 @@ where
 					Requests::AvailableDataFetchingV1(_) =>
 						metrics.on_message("available_data_fetching_v1"),
 					Requests::CollationFetchingV1(_) => metrics.on_message("collation_fetching_v1"),
-					Requests::CollationFetchingVStaging(_) =>
-						metrics.on_message("collation_fetching_vstaging"),
+					Requests::CollationFetchingV2(_) => metrics.on_message("collation_fetching_v2"),
 					Requests::PoVFetchingV1(_) => metrics.on_message("pov_fetching_v1"),
 					Requests::DisputeSendingV1(_) => metrics.on_message("dispute_sending_v1"),
 					Requests::StatementFetchingV1(_) => metrics.on_message("statement_fetching_v1"),
-					Requests::AttestedCandidateVStaging(_) =>
-						metrics.on_message("attested_candidate_vstaging"),
+					Requests::AttestedCandidateV2(_) => metrics.on_message("attested_candidate_v2"),
 				}
 
 				network_service
@@ -425,36 +423,36 @@ fn send_collation_message_v1(
 	);
 }
 
-fn send_validation_message_vstaging(
+fn send_validation_message_v2(
 	net: &mut impl Network,
 	peers: Vec<PeerId>,
 	protocol_names: &PeerSetProtocolNames,
-	message: WireMessage<protocol_vstaging::ValidationProtocol>,
+	message: WireMessage<protocol_v2::ValidationProtocol>,
 	metrics: &Metrics,
 ) {
 	send_message(
 		net,
 		peers,
 		PeerSet::Validation,
-		ValidationVersion::VStaging.into(),
+		ValidationVersion::V2.into(),
 		protocol_names,
 		message,
 		metrics,
 	);
 }
 
-fn send_collation_message_vstaging(
+fn send_collation_message_v2(
 	net: &mut impl Network,
 	peers: Vec<PeerId>,
 	protocol_names: &PeerSetProtocolNames,
-	message: WireMessage<protocol_vstaging::CollationProtocol>,
+	message: WireMessage<protocol_v2::CollationProtocol>,
 	metrics: &Metrics,
 ) {
 	send_message(
 		net,
 		peers,
 		PeerSet::Collation,
-		CollationVersion::VStaging.into(),
+		CollationVersion::V2.into(),
 		protocol_names,
 		message,
 		metrics,
diff --git a/polkadot/node/network/bridge/src/tx/tests.rs b/polkadot/node/network/bridge/src/tx/tests.rs
index 21cd134c54f..48287f8b74c 100644
--- a/polkadot/node/network/bridge/src/tx/tests.rs
+++ b/polkadot/node/network/bridge/src/tx/tests.rs
@@ -341,10 +341,10 @@ fn network_protocol_versioning_send() {
 
 		let peer_ids: Vec<_> = (0..4).map(|_| PeerId::random()).collect();
 		let peers = [
-			(peer_ids[0], PeerSet::Validation, ValidationVersion::VStaging),
+			(peer_ids[0], PeerSet::Validation, ValidationVersion::V2),
 			(peer_ids[1], PeerSet::Collation, ValidationVersion::V1),
 			(peer_ids[2], PeerSet::Validation, ValidationVersion::V1),
-			(peer_ids[3], PeerSet::Collation, ValidationVersion::VStaging),
+			(peer_ids[3], PeerSet::Collation, ValidationVersion::V2),
 		];
 
 		for &(peer_id, peer_set, version) in &peers {
@@ -359,9 +359,9 @@ fn network_protocol_versioning_send() {
 
 		{
 			let approval_distribution_message =
-				protocol_vstaging::ApprovalDistributionMessage::Approvals(Vec::new());
+				protocol_v2::ApprovalDistributionMessage::Approvals(Vec::new());
 
-			let msg = protocol_vstaging::ValidationProtocol::ApprovalDistribution(
+			let msg = protocol_v2::ValidationProtocol::ApprovalDistribution(
 				approval_distribution_message.clone(),
 			);
 
@@ -372,7 +372,7 @@ fn network_protocol_versioning_send() {
 				.send(FromOrchestra::Communication {
 					msg: NetworkBridgeTxMessage::SendValidationMessage(
 						receivers.clone(),
-						Versioned::VStaging(msg.clone()),
+						Versioned::V2(msg.clone()),
 					),
 				})
 				.timeout(TIMEOUT)
@@ -398,15 +398,14 @@ fn network_protocol_versioning_send() {
 		// send a collation protocol message.
 
 		{
-			let collator_protocol_message = protocol_vstaging::CollatorProtocolMessage::Declare(
+			let collator_protocol_message = protocol_v2::CollatorProtocolMessage::Declare(
 				Sr25519Keyring::Alice.public().into(),
 				0_u32.into(),
 				dummy_collator_signature(),
 			);
 
-			let msg = protocol_vstaging::CollationProtocol::CollatorProtocol(
-				collator_protocol_message.clone(),
-			);
+			let msg =
+				protocol_v2::CollationProtocol::CollatorProtocol(collator_protocol_message.clone());
 
 			let receivers = vec![peer_ids[1], peer_ids[2]];
 
@@ -414,7 +413,7 @@ fn network_protocol_versioning_send() {
 				.send(FromOrchestra::Communication {
 					msg: NetworkBridgeTxMessage::SendCollationMessages(vec![(
 						receivers.clone(),
-						Versioned::VStaging(msg.clone()),
+						Versioned::V2(msg.clone()),
 					)]),
 				})
 				.await;
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs
index 627c38b776f..53f947142d1 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs
@@ -22,8 +22,7 @@ use futures::{future::BoxFuture, stream::FuturesUnordered};
 
 use polkadot_node_network_protocol::{
 	request_response::{
-		incoming::OutgoingResponse, v1 as protocol_v1, vstaging as protocol_vstaging,
-		IncomingRequest,
+		incoming::OutgoingResponse, v1 as protocol_v1, v2 as protocol_v2, IncomingRequest,
 	},
 	PeerId,
 };
@@ -89,7 +88,7 @@ pub struct WaitingCollationFetches {
 /// Backwards-compatible wrapper for incoming collations requests.
 pub enum VersionedCollationRequest {
 	V1(IncomingRequest<protocol_v1::CollationFetchingRequest>),
-	VStaging(IncomingRequest<protocol_vstaging::CollationFetchingRequest>),
+	V2(IncomingRequest<protocol_v2::CollationFetchingRequest>),
 }
 
 impl From<IncomingRequest<protocol_v1::CollationFetchingRequest>> for VersionedCollationRequest {
@@ -98,11 +97,9 @@ impl From<IncomingRequest<protocol_v1::CollationFetchingRequest>> for VersionedC
 	}
 }
 
-impl From<IncomingRequest<protocol_vstaging::CollationFetchingRequest>>
-	for VersionedCollationRequest
-{
-	fn from(req: IncomingRequest<protocol_vstaging::CollationFetchingRequest>) -> Self {
-		Self::VStaging(req)
+impl From<IncomingRequest<protocol_v2::CollationFetchingRequest>> for VersionedCollationRequest {
+	fn from(req: IncomingRequest<protocol_v2::CollationFetchingRequest>) -> Self {
+		Self::V2(req)
 	}
 }
 
@@ -111,7 +108,7 @@ impl VersionedCollationRequest {
 	pub fn para_id(&self) -> ParaId {
 		match self {
 			VersionedCollationRequest::V1(req) => req.payload.para_id,
-			VersionedCollationRequest::VStaging(req) => req.payload.para_id,
+			VersionedCollationRequest::V2(req) => req.payload.para_id,
 		}
 	}
 
@@ -119,7 +116,7 @@ impl VersionedCollationRequest {
 	pub fn relay_parent(&self) -> Hash {
 		match self {
 			VersionedCollationRequest::V1(req) => req.payload.relay_parent,
-			VersionedCollationRequest::VStaging(req) => req.payload.relay_parent,
+			VersionedCollationRequest::V2(req) => req.payload.relay_parent,
 		}
 	}
 
@@ -127,7 +124,7 @@ impl VersionedCollationRequest {
 	pub fn peer_id(&self) -> PeerId {
 		match self {
 			VersionedCollationRequest::V1(req) => req.peer,
-			VersionedCollationRequest::VStaging(req) => req.peer,
+			VersionedCollationRequest::V2(req) => req.peer,
 		}
 	}
 
@@ -138,7 +135,7 @@ impl VersionedCollationRequest {
 	) -> Result<(), ()> {
 		match self {
 			VersionedCollationRequest::V1(req) => req.send_outgoing_response(response),
-			VersionedCollationRequest::VStaging(req) => req.send_outgoing_response(response),
+			VersionedCollationRequest::V2(req) => req.send_outgoing_response(response),
 		}
 	}
 }
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
index ad2ab99568c..304cabbaac8 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
@@ -31,10 +31,10 @@ use polkadot_node_network_protocol::{
 	peer_set::{CollationVersion, PeerSet},
 	request_response::{
 		incoming::{self, OutgoingResponse},
-		v1 as request_v1, vstaging as request_vstaging, IncomingRequestReceiver,
+		v1 as request_v1, v2 as request_v2, IncomingRequestReceiver,
 	},
-	v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId,
-	UnifiedReputationChange as Rep, Versioned, View,
+	v1 as protocol_v1, v2 as protocol_v2, OurView, PeerId, UnifiedReputationChange as Rep,
+	Versioned, View,
 };
 use polkadot_node_primitives::{CollationSecondedSignal, PoV, Statement};
 use polkadot_node_subsystem::{
@@ -577,7 +577,7 @@ async fn determine_our_validators<Context>(
 fn declare_message(
 	state: &mut State,
 	version: CollationVersion,
-) -> Option<Versioned<protocol_v1::CollationProtocol, protocol_vstaging::CollationProtocol>> {
+) -> Option<Versioned<protocol_v1::CollationProtocol, protocol_v2::CollationProtocol>> {
 	let para_id = state.collating_on?;
 	Some(match version {
 		CollationVersion::V1 => {
@@ -590,17 +590,15 @@ fn declare_message(
 			);
 			Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message))
 		},
-		CollationVersion::VStaging => {
+		CollationVersion::V2 => {
 			let declare_signature_payload =
-				protocol_vstaging::declare_signature_payload(&state.local_peer_id);
-			let wire_message = protocol_vstaging::CollatorProtocolMessage::Declare(
+				protocol_v2::declare_signature_payload(&state.local_peer_id);
+			let wire_message = protocol_v2::CollatorProtocolMessage::Declare(
 				state.collator_pair.public(),
 				para_id,
 				state.collator_pair.sign(&declare_signature_payload),
 			);
-			Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol(
-				wire_message,
-			))
+			Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(wire_message))
 		},
 	})
 }
@@ -706,15 +704,13 @@ async fn advertise_collation<Context>(
 		collation.status.advance_to_advertised();
 
 		let collation_message = match protocol_version {
-			CollationVersion::VStaging => {
-				let wire_message = protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation {
+			CollationVersion::V2 => {
+				let wire_message = protocol_v2::CollatorProtocolMessage::AdvertiseCollation {
 					relay_parent,
 					candidate_hash: *candidate_hash,
 					parent_head_data_hash: collation.parent_head_data_hash,
 				};
-				Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol(
-					wire_message,
-				))
+				Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(wire_message))
 			},
 			CollationVersion::V1 => {
 				let wire_message =
@@ -837,7 +833,7 @@ async fn send_collation(
 	let candidate_hash = receipt.hash();
 
 	// The response payload is the same for both versions of protocol
-	// and doesn't have vstaging alias for simplicity.
+	// and doesn't have v2 alias for simplicity.
 	let response = OutgoingResponse {
 		result: Ok(request_v1::CollationFetchingResponse::Collation(receipt, pov)),
 		reputation_changes: Vec::new(),
@@ -868,16 +864,13 @@ async fn handle_incoming_peer_message<Context>(
 	runtime: &mut RuntimeInfo,
 	state: &mut State,
 	origin: PeerId,
-	msg: Versioned<
-		protocol_v1::CollatorProtocolMessage,
-		protocol_vstaging::CollatorProtocolMessage,
-	>,
+	msg: Versioned<protocol_v1::CollatorProtocolMessage, protocol_v2::CollatorProtocolMessage>,
 ) -> Result<()> {
 	use protocol_v1::CollatorProtocolMessage as V1;
-	use protocol_vstaging::CollatorProtocolMessage as VStaging;
+	use protocol_v2::CollatorProtocolMessage as V2;
 
 	match msg {
-		Versioned::V1(V1::Declare(..)) | Versioned::VStaging(VStaging::Declare(..)) => {
+		Versioned::V1(V1::Declare(..)) | Versioned::V2(V2::Declare(..)) => {
 			gum::trace!(
 				target: LOG_TARGET,
 				?origin,
@@ -888,8 +881,7 @@ async fn handle_incoming_peer_message<Context>(
 			ctx.send_message(NetworkBridgeTxMessage::DisconnectPeer(origin, PeerSet::Collation))
 				.await;
 		},
-		Versioned::V1(V1::AdvertiseCollation(_)) |
-		Versioned::VStaging(VStaging::AdvertiseCollation { .. }) => {
+		Versioned::V1(V1::AdvertiseCollation(_)) | Versioned::V2(V2::AdvertiseCollation { .. }) => {
 			gum::trace!(
 				target: LOG_TARGET,
 				?origin,
@@ -904,7 +896,7 @@ async fn handle_incoming_peer_message<Context>(
 				.await;
 		},
 		Versioned::V1(V1::CollationSeconded(relay_parent, statement)) |
-		Versioned::VStaging(VStaging::CollationSeconded(relay_parent, statement)) => {
+		Versioned::V2(V2::CollationSeconded(relay_parent, statement)) => {
 			if !matches!(statement.unchecked_payload(), Statement::Seconded(_)) {
 				gum::warn!(
 					target: LOG_TARGET,
@@ -1006,7 +998,7 @@ async fn handle_incoming_request<Context>(
 			let collation = match &req {
 				VersionedCollationRequest::V1(_) if !mode.is_enabled() =>
 					per_relay_parent.collations.values_mut().next(),
-				VersionedCollationRequest::VStaging(req) =>
+				VersionedCollationRequest::V2(req) =>
 					per_relay_parent.collations.get_mut(&req.payload.candidate_hash),
 				_ => {
 					gum::warn!(
@@ -1322,7 +1314,7 @@ pub(crate) async fn run<Context>(
 	local_peer_id: PeerId,
 	collator_pair: CollatorPair,
 	req_v1_receiver: IncomingRequestReceiver<request_v1::CollationFetchingRequest>,
-	req_v2_receiver: IncomingRequestReceiver<request_vstaging::CollationFetchingRequest>,
+	req_v2_receiver: IncomingRequestReceiver<request_v2::CollationFetchingRequest>,
 	metrics: Metrics,
 ) -> std::result::Result<(), FatalError> {
 	run_inner(
@@ -1344,7 +1336,7 @@ async fn run_inner<Context>(
 	local_peer_id: PeerId,
 	collator_pair: CollatorPair,
 	mut req_v1_receiver: IncomingRequestReceiver<request_v1::CollationFetchingRequest>,
-	mut req_v2_receiver: IncomingRequestReceiver<request_vstaging::CollationFetchingRequest>,
+	mut req_v2_receiver: IncomingRequestReceiver<request_v2::CollationFetchingRequest>,
 	metrics: Metrics,
 	reputation: ReputationAggregator,
 	reputation_interval: Duration,
@@ -1425,7 +1417,7 @@ async fn run_inner<Context>(
 						(ProspectiveParachainsMode::Disabled, VersionedCollationRequest::V1(_)) => {
 							per_relay_parent.collations.values().next()
 						},
-						(ProspectiveParachainsMode::Enabled { .. }, VersionedCollationRequest::VStaging(req)) => {
+						(ProspectiveParachainsMode::Enabled { .. }, VersionedCollationRequest::V2(req)) => {
 							per_relay_parent.collations.get(&req.payload.candidate_hash)
 						},
 						_ => {
@@ -1476,7 +1468,7 @@ async fn run_inner<Context>(
 
 				log_error(
 					handle_incoming_request(&mut ctx, &mut state, request).await,
-					"Handling incoming collation fetch request VStaging"
+					"Handling incoming collation fetch request V2"
 				)?;
 			}
 		}
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs
index b452c84c2cd..7dd2287dab6 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs
@@ -198,7 +198,7 @@ impl TestState {
 			overseer_recv(virtual_overseer).await,
 			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 				relay_parent,
-				RuntimeApiRequest::StagingAsyncBackingParams(tx)
+				RuntimeApiRequest::AsyncBackingParams(tx)
 			)) => {
 				assert_eq!(relay_parent, self.relay_parent);
 				tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap();
@@ -212,7 +212,7 @@ type VirtualOverseer = test_helpers::TestSubsystemContextHandle<CollatorProtocol
 struct TestHarness {
 	virtual_overseer: VirtualOverseer,
 	req_v1_cfg: sc_network::config::RequestResponseConfig,
-	req_vstaging_cfg: sc_network::config::RequestResponseConfig,
+	req_v2_cfg: sc_network::config::RequestResponseConfig,
 }
 
 fn test_harness<T: Future<Output = TestHarness>>(
@@ -236,7 +236,7 @@ fn test_harness<T: Future<Output = TestHarness>>(
 
 	let (collation_req_receiver, req_v1_cfg) =
 		IncomingRequest::get_config_receiver(&req_protocol_names);
-	let (collation_req_vstaging_receiver, req_vstaging_cfg) =
+	let (collation_req_v2_receiver, req_v2_cfg) =
 		IncomingRequest::get_config_receiver(&req_protocol_names);
 	let subsystem = async {
 		run_inner(
@@ -244,7 +244,7 @@ fn test_harness<T: Future<Output = TestHarness>>(
 			local_peer_id,
 			collator_pair,
 			collation_req_receiver,
-			collation_req_vstaging_receiver,
+			collation_req_v2_receiver,
 			Default::default(),
 			reputation,
 			REPUTATION_CHANGE_TEST_INTERVAL,
@@ -253,7 +253,7 @@ fn test_harness<T: Future<Output = TestHarness>>(
 		.unwrap();
 	};
 
-	let test_fut = test(TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg });
+	let test_fut = test(TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg });
 
 	futures::pin_mut!(test_fut);
 	futures::pin_mut!(subsystem);
@@ -330,7 +330,7 @@ async fn setup_system(virtual_overseer: &mut VirtualOverseer, test_state: &TestS
 		overseer_recv(virtual_overseer).await,
 		AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 			relay_parent,
-			RuntimeApiRequest::StagingAsyncBackingParams(tx)
+			RuntimeApiRequest::AsyncBackingParams(tx)
 		)) => {
 			assert_eq!(relay_parent, test_state.relay_parent);
 			tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap();
@@ -545,7 +545,7 @@ async fn expect_declare_msg(
 
 /// Check that the next received message is a collation advertisement message.
 ///
-/// Expects vstaging message if `expected_candidate_hashes` is `Some`, v1 otherwise.
+/// Expects v2 message if `expected_candidate_hashes` is `Some`, v1 otherwise.
 async fn expect_advertise_collation_msg(
 	virtual_overseer: &mut VirtualOverseer,
 	peer: &PeerId,
@@ -579,13 +579,13 @@ async fn expect_advertise_collation_msg(
 					},
 					(
 						Some(candidate_hashes),
-						Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol(
+						Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(
 							wire_message,
 						)),
 					) => {
 						assert_matches!(
 							wire_message,
-							protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation {
+							protocol_v2::CollatorProtocolMessage::AdvertiseCollation {
 								relay_parent,
 								candidate_hash,
 								..
@@ -634,7 +634,7 @@ fn advertise_and_send_collation() {
 		|test_harness| async move {
 			let mut virtual_overseer = test_harness.virtual_overseer;
 			let mut req_v1_cfg = test_harness.req_v1_cfg;
-			let req_vstaging_cfg = test_harness.req_vstaging_cfg;
+			let req_v2_cfg = test_harness.req_v2_cfg;
 
 			setup_system(&mut virtual_overseer, &test_state).await;
 
@@ -789,7 +789,7 @@ fn advertise_and_send_collation() {
 				None,
 			)
 			.await;
-			TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg }
+			TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg }
 		},
 	);
 }
@@ -807,7 +807,7 @@ fn delay_reputation_change() {
 		|test_harness| async move {
 			let mut virtual_overseer = test_harness.virtual_overseer;
 			let mut req_v1_cfg = test_harness.req_v1_cfg;
-			let req_vstaging_cfg = test_harness.req_vstaging_cfg;
+			let req_v2_cfg = test_harness.req_v2_cfg;
 
 			setup_system(&mut virtual_overseer, &test_state).await;
 
@@ -903,15 +903,15 @@ fn delay_reputation_change() {
 				);
 			}
 
-			TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg }
+			TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg }
 		},
 	);
 }
 
-/// Tests that collator side works with vstaging network protocol
+/// Tests that collator side works with v2 network protocol
 /// before async backing is enabled.
 #[test]
-fn advertise_collation_vstaging_protocol() {
+fn advertise_collation_v2_protocol() {
 	let test_state = TestState::default();
 	let local_peer_id = test_state.local_peer_id;
 	let collator_pair = test_state.collator_pair.clone();
@@ -941,21 +941,16 @@ fn advertise_collation_vstaging_protocol() {
 				Some(validators[0].clone()),
 			)
 			.await;
-			// The rest with vstaging.
+			// The rest with v2.
 			for (val, peer) in validators.iter().zip(peer_ids.iter()).skip(1) {
-				connect_peer(
-					virtual_overseer,
-					*peer,
-					CollationVersion::VStaging,
-					Some(val.clone()),
-				)
-				.await;
+				connect_peer(virtual_overseer, *peer, CollationVersion::V2, Some(val.clone()))
+					.await;
 			}
 
 			// Declare messages.
 			expect_declare_msg(virtual_overseer, &test_state, &peer_ids[0]).await;
 			for peer_id in peer_ids.iter().skip(1) {
-				prospective_parachains::expect_declare_msg_vstaging(
+				prospective_parachains::expect_declare_msg_v2(
 					virtual_overseer,
 					&test_state,
 					&peer_id,
@@ -981,7 +976,7 @@ fn advertise_collation_vstaging_protocol() {
 					virtual_overseer,
 					peer_id,
 					test_state.relay_parent,
-					Some(vec![candidate.hash()]), // This is `Some`, advertisement is vstaging.
+					Some(vec![candidate.hash()]), // This is `Some`, advertisement is v2.
 				)
 				.await;
 			}
@@ -1405,7 +1400,7 @@ fn connect_to_buffered_groups() {
 		|test_harness| async move {
 			let mut virtual_overseer = test_harness.virtual_overseer;
 			let mut req_cfg = test_harness.req_v1_cfg;
-			let req_vstaging_cfg = test_harness.req_vstaging_cfg;
+			let req_v2_cfg = test_harness.req_v2_cfg;
 
 			setup_system(&mut virtual_overseer, &test_state).await;
 
@@ -1510,7 +1505,7 @@ fn connect_to_buffered_groups() {
 				}
 			);
 
-			TestHarness { virtual_overseer, req_v1_cfg: req_cfg, req_vstaging_cfg }
+			TestHarness { virtual_overseer, req_v1_cfg: req_cfg, req_v2_cfg }
 		},
 	);
 }
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs
index bd55c35852f..fd9d7a746eb 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs
@@ -19,10 +19,10 @@
 use super::*;
 
 use polkadot_node_subsystem::messages::{ChainApiMessage, ProspectiveParachainsMessage};
-use polkadot_primitives::{vstaging as vstaging_primitives, Header, OccupiedCore};
+use polkadot_primitives::{AsyncBackingParams, Header, OccupiedCore};
 
-const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams =
-	vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 };
+const ASYNC_BACKING_PARAMETERS: AsyncBackingParams =
+	AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 };
 
 fn get_parent_hash(hash: Hash) -> Hash {
 	Hash::from_low_u64_be(hash.to_low_u64_be() + 1)
@@ -52,7 +52,7 @@ async fn update_view(
 			overseer_recv(virtual_overseer).await,
 			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 				parent,
-				RuntimeApiRequest::StagingAsyncBackingParams(tx),
+				RuntimeApiRequest::AsyncBackingParams(tx),
 			)) => {
 				tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap();
 				(parent, new_view.get(&parent).copied().expect("Unknown parent requested"))
@@ -124,7 +124,7 @@ async fn update_view(
 }
 
 /// Check that the next received message is a `Declare` message.
-pub(super) async fn expect_declare_msg_vstaging(
+pub(super) async fn expect_declare_msg_v2(
 	virtual_overseer: &mut VirtualOverseer,
 	test_state: &TestState,
 	peer: &PeerId,
@@ -133,20 +133,20 @@ pub(super) async fn expect_declare_msg_vstaging(
 		overseer_recv(virtual_overseer).await,
 		AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendCollationMessage(
 			to,
-			Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol(
+			Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(
 				wire_message,
 			)),
 		)) => {
 			assert_eq!(to[0], *peer);
 			assert_matches!(
 				wire_message,
-				protocol_vstaging::CollatorProtocolMessage::Declare(
+				protocol_v2::CollatorProtocolMessage::Declare(
 					collator_id,
 					para_id,
 					signature,
 				) => {
 					assert!(signature.verify(
-						&*protocol_vstaging::declare_signature_payload(&test_state.local_peer_id),
+						&*protocol_v2::declare_signature_payload(&test_state.local_peer_id),
 						&collator_id),
 					);
 					assert_eq!(collator_id, test_state.collator_pair.public());
@@ -203,13 +203,12 @@ fn distribute_collation_from_implicit_view() {
 				.into_iter()
 				.zip(validator_peer_ids.clone())
 			{
-				connect_peer(virtual_overseer, peer, CollationVersion::VStaging, Some(val.clone()))
-					.await;
+				connect_peer(virtual_overseer, peer, CollationVersion::V2, Some(val.clone())).await;
 			}
 
 			// Collator declared itself to each peer.
 			for peer_id in &validator_peer_ids {
-				expect_declare_msg_vstaging(virtual_overseer, &test_state, peer_id).await;
+				expect_declare_msg_v2(virtual_overseer, &test_state, peer_id).await;
 			}
 
 			let pov = PoV { block_data: BlockData(vec![1, 2, 3]) };
@@ -386,7 +385,7 @@ fn advertise_and_send_collation_by_hash() {
 		|test_harness| async move {
 			let mut virtual_overseer = test_harness.virtual_overseer;
 			let req_v1_cfg = test_harness.req_v1_cfg;
-			let mut req_vstaging_cfg = test_harness.req_vstaging_cfg;
+			let mut req_v2_cfg = test_harness.req_v2_cfg;
 
 			let head_a = Hash::from_low_u64_be(128);
 			let head_a_num: u32 = 64;
@@ -435,11 +434,11 @@ fn advertise_and_send_collation_by_hash() {
 			connect_peer(
 				&mut virtual_overseer,
 				peer,
-				CollationVersion::VStaging,
+				CollationVersion::V2,
 				Some(validator_id.clone()),
 			)
 			.await;
-			expect_declare_msg_vstaging(&mut virtual_overseer, &test_state, &peer).await;
+			expect_declare_msg_v2(&mut virtual_overseer, &test_state, &peer).await;
 
 			// Head `b` is not a leaf, but both advertisements are still relevant.
 			send_peer_view_change(&mut virtual_overseer, &peer, vec![head_b]).await;
@@ -449,13 +448,13 @@ fn advertise_and_send_collation_by_hash() {
 
 			for (candidate, pov_block) in candidates {
 				let (pending_response, rx) = oneshot::channel();
-				req_vstaging_cfg
+				req_v2_cfg
 					.inbound_queue
 					.as_mut()
 					.unwrap()
 					.send(RawIncomingRequest {
 						peer,
-						payload: request_vstaging::CollationFetchingRequest {
+						payload: request_v2::CollationFetchingRequest {
 							relay_parent: head_b,
 							para_id: test_state.para_id,
 							candidate_hash: candidate.hash(),
@@ -469,7 +468,7 @@ fn advertise_and_send_collation_by_hash() {
 				assert_matches!(
 					rx.await,
 					Ok(full_response) => {
-						// Response is the same for vstaging.
+						// Response is the same for v2.
 						let request_v1::CollationFetchingResponse::Collation(receipt, pov): request_v1::CollationFetchingResponse
 							= request_v1::CollationFetchingResponse::decode(
 								&mut full_response.result
@@ -482,7 +481,7 @@ fn advertise_and_send_collation_by_hash() {
 				);
 			}
 
-			TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg }
+			TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg }
 		},
 	)
 }
@@ -552,11 +551,11 @@ fn advertise_core_occupied() {
 			connect_peer(
 				virtual_overseer,
 				peer_ids[0],
-				CollationVersion::VStaging,
+				CollationVersion::V2,
 				Some(validators[0].clone()),
 			)
 			.await;
-			expect_declare_msg_vstaging(virtual_overseer, &test_state, &peer_ids[0]).await;
+			expect_declare_msg_v2(virtual_overseer, &test_state, &peer_ids[0]).await;
 			// Peer is aware of the leaf.
 			send_peer_view_change(virtual_overseer, &peer_ids[0], vec![head_a]).await;
 
diff --git a/polkadot/node/network/collator-protocol/src/lib.rs b/polkadot/node/network/collator-protocol/src/lib.rs
index 62c033954f7..1edc6766417 100644
--- a/polkadot/node/network/collator-protocol/src/lib.rs
+++ b/polkadot/node/network/collator-protocol/src/lib.rs
@@ -32,7 +32,7 @@ use polkadot_node_subsystem_util::reputation::ReputationAggregator;
 use sp_keystore::KeystorePtr;
 
 use polkadot_node_network_protocol::{
-	request_response::{v1 as request_v1, vstaging as protocol_vstaging, IncomingRequestReceiver},
+	request_response::{v1 as request_v1, v2 as protocol_v2, IncomingRequestReceiver},
 	PeerId, UnifiedReputationChange as Rep,
 };
 use polkadot_primitives::CollatorPair;
@@ -83,9 +83,8 @@ pub enum ProtocolSide {
 		collator_pair: CollatorPair,
 		/// Receiver for v1 collation fetching requests.
 		request_receiver_v1: IncomingRequestReceiver<request_v1::CollationFetchingRequest>,
-		/// Receiver for vstaging collation fetching requests.
-		request_receiver_vstaging:
-			IncomingRequestReceiver<protocol_vstaging::CollationFetchingRequest>,
+		/// Receiver for v2 collation fetching requests.
+		request_receiver_v2: IncomingRequestReceiver<protocol_v2::CollationFetchingRequest>,
 		/// Metrics.
 		metrics: collator_side::Metrics,
 	},
@@ -121,14 +120,14 @@ impl<Context> CollatorProtocolSubsystem {
 				peer_id,
 				collator_pair,
 				request_receiver_v1,
-				request_receiver_vstaging,
+				request_receiver_v2,
 				metrics,
 			} => collator_side::run(
 				ctx,
 				peer_id,
 				collator_pair,
 				request_receiver_v1,
-				request_receiver_vstaging,
+				request_receiver_v2,
 				metrics,
 			)
 			.map_err(|e| SubsystemError::with_origin("collator-protocol", e))
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs
index 4c92780f2da..a53e0028b9e 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs
@@ -119,7 +119,7 @@ impl PendingCollation {
 	}
 }
 
-/// vstaging advertisement that was rejected by the backing
+/// v2 advertisement that was rejected by the backing
 /// subsystem. Validator may fetch it later if its fragment
 /// membership gets recognized before relay parent goes out of view.
 #[derive(Debug, Clone)]
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
index e8cf769d2e5..fcb408d54b1 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
@@ -34,10 +34,10 @@ use polkadot_node_network_protocol::{
 	peer_set::{CollationVersion, PeerSet},
 	request_response::{
 		outgoing::{Recipient, RequestError},
-		v1 as request_v1, vstaging as request_vstaging, OutgoingRequest, Requests,
+		v1 as request_v1, v2 as request_v2, OutgoingRequest, Requests,
 	},
-	v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId,
-	UnifiedReputationChange as Rep, Versioned, View,
+	v1 as protocol_v1, v2 as protocol_v2, OurView, PeerId, UnifiedReputationChange as Rep,
+	Versioned, View,
 };
 use polkadot_node_primitives::{SignedFullStatement, Statement};
 use polkadot_node_subsystem::{
@@ -624,13 +624,9 @@ async fn notify_collation_seconded(
 		CollationVersion::V1 => Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(
 			protocol_v1::CollatorProtocolMessage::CollationSeconded(relay_parent, statement),
 		)),
-		CollationVersion::VStaging =>
-			Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol(
-				protocol_vstaging::CollatorProtocolMessage::CollationSeconded(
-					relay_parent,
-					statement,
-				),
-			)),
+		CollationVersion::V2 => Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(
+			protocol_v2::CollatorProtocolMessage::CollationSeconded(relay_parent, statement),
+		)),
 	};
 	sender
 		.send_message(NetworkBridgeTxMessage::SendCollationMessage(vec![peer_id], wire_message))
@@ -694,16 +690,12 @@ async fn request_collation(
 			let requests = Requests::CollationFetchingV1(req);
 			(requests, response_recv.boxed())
 		},
-		(CollationVersion::VStaging, Some(ProspectiveCandidate { candidate_hash, .. })) => {
+		(CollationVersion::V2, Some(ProspectiveCandidate { candidate_hash, .. })) => {
 			let (req, response_recv) = OutgoingRequest::new(
 				Recipient::Peer(peer_id),
-				request_vstaging::CollationFetchingRequest {
-					relay_parent,
-					para_id,
-					candidate_hash,
-				},
+				request_v2::CollationFetchingRequest { relay_parent, para_id, candidate_hash },
 			);
-			let requests = Requests::CollationFetchingVStaging(req);
+			let requests = Requests::CollationFetchingV2(req);
 			(requests, response_recv.boxed())
 		},
 		_ => return Err(FetchError::ProtocolMismatch),
@@ -758,18 +750,15 @@ async fn process_incoming_peer_message<Context>(
 	ctx: &mut Context,
 	state: &mut State,
 	origin: PeerId,
-	msg: Versioned<
-		protocol_v1::CollatorProtocolMessage,
-		protocol_vstaging::CollatorProtocolMessage,
-	>,
+	msg: Versioned<protocol_v1::CollatorProtocolMessage, protocol_v2::CollatorProtocolMessage>,
 ) {
 	use protocol_v1::CollatorProtocolMessage as V1;
-	use protocol_vstaging::CollatorProtocolMessage as VStaging;
+	use protocol_v2::CollatorProtocolMessage as V2;
 	use sp_runtime::traits::AppVerify;
 
 	match msg {
 		Versioned::V1(V1::Declare(collator_id, para_id, signature)) |
-		Versioned::VStaging(VStaging::Declare(collator_id, para_id, signature)) => {
+		Versioned::V2(V2::Declare(collator_id, para_id, signature)) => {
 			if collator_peer_id(&state.peer_data, &collator_id).is_some() {
 				modify_reputation(
 					&mut state.reputation,
@@ -881,7 +870,7 @@ async fn process_incoming_peer_message<Context>(
 					modify_reputation(&mut state.reputation, ctx.sender(), origin, rep).await;
 				}
 			},
-		Versioned::VStaging(VStaging::AdvertiseCollation {
+		Versioned::V2(V2::AdvertiseCollation {
 			relay_parent,
 			candidate_hash,
 			parent_head_data_hash,
@@ -901,15 +890,14 @@ async fn process_incoming_peer_message<Context>(
 					?relay_parent,
 					?candidate_hash,
 					error = ?err,
-					"Rejected vstaging advertisement",
+					"Rejected v2 advertisement",
 				);
 
 				if let Some(rep) = err.reputation_changes() {
 					modify_reputation(&mut state.reputation, ctx.sender(), origin, rep).await;
 				}
 			},
-		Versioned::V1(V1::CollationSeconded(..)) |
-		Versioned::VStaging(VStaging::CollationSeconded(..)) => {
+		Versioned::V1(V1::CollationSeconded(..)) | Versioned::V2(V2::CollationSeconded(..)) => {
 			gum::warn!(
 				target: LOG_TARGET,
 				peer_id = ?origin,
@@ -1074,7 +1062,7 @@ where
 	};
 
 	if relay_parent_mode.is_enabled() && prospective_candidate.is_none() {
-		// Expected vstaging advertisement.
+		// Expected v2 advertisement.
 		return Err(AdvertisementError::ProtocolMismatch)
 	}
 
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs
index 1cb656e325d..9812998aab7 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs
@@ -357,7 +357,7 @@ async fn assert_fetch_collation_request(
 			),
 			Some(candidate_hash) => assert_matches!(
 				req,
-				Requests::CollationFetchingVStaging(req) => {
+				Requests::CollationFetchingV2(req) => {
 					let payload = req.payload;
 					assert_eq!(payload.relay_parent, relay_parent);
 					assert_eq!(payload.para_id, para_id);
@@ -394,12 +394,11 @@ async fn connect_and_declare_collator(
 			para_id,
 			collator.sign(&protocol_v1::declare_signature_payload(&peer)),
 		)),
-		CollationVersion::VStaging =>
-			Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::Declare(
-				collator.public(),
-				para_id,
-				collator.sign(&protocol_v1::declare_signature_payload(&peer)),
-			)),
+		CollationVersion::V2 => Versioned::V2(protocol_v2::CollatorProtocolMessage::Declare(
+			collator.public(),
+			para_id,
+			collator.sign(&protocol_v1::declare_signature_payload(&peer)),
+		)),
 	};
 
 	overseer_send(
@@ -421,7 +420,7 @@ async fn advertise_collation(
 ) {
 	let wire_message = match candidate {
 		Some((candidate_hash, parent_head_data_hash)) =>
-			Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation {
+			Versioned::V2(protocol_v2::CollatorProtocolMessage::AdvertiseCollation {
 				relay_parent,
 				candidate_hash,
 				parent_head_data_hash,
@@ -444,7 +443,7 @@ async fn assert_async_backing_params_request(virtual_overseer: &mut VirtualOvers
 		overseer_recv(virtual_overseer).await,
 		AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 			relay_parent,
-			RuntimeApiRequest::StagingAsyncBackingParams(tx)
+			RuntimeApiRequest::AsyncBackingParams(tx)
 		)) => {
 			assert_eq!(relay_parent, hash);
 			tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap();
@@ -499,10 +498,10 @@ fn act_on_advertisement() {
 	});
 }
 
-/// Tests that validator side works with vstaging network protocol
+/// Tests that validator side works with v2 network protocol
 /// before async backing is enabled.
 #[test]
-fn act_on_advertisement_vstaging() {
+fn act_on_advertisement_v2() {
 	let test_state = TestState::default();
 
 	test_harness(ReputationAggregator::new(|_| true), |test_harness| async move {
@@ -529,13 +528,13 @@ fn act_on_advertisement_vstaging() {
 			peer_b,
 			pair.clone(),
 			test_state.chain_ids[0],
-			CollationVersion::VStaging,
+			CollationVersion::V2,
 		)
 		.await;
 
 		let candidate_hash = CandidateHash::default();
 		let parent_head_data_hash = Hash::zero();
-		// vstaging advertisement.
+		// v2 advertisement.
 		advertise_collation(
 			&mut virtual_overseer,
 			peer_b,
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs
index e2a007b308e..4da0f11da39 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs
@@ -20,12 +20,12 @@ use super::*;
 
 use polkadot_node_subsystem::messages::ChainApiMessage;
 use polkadot_primitives::{
-	vstaging as vstaging_primitives, BlockNumber, CandidateCommitments, CommittedCandidateReceipt,
-	Header, SigningContext, ValidatorId,
+	AsyncBackingParams, BlockNumber, CandidateCommitments, CommittedCandidateReceipt, Header,
+	SigningContext, ValidatorId,
 };
 
-const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams =
-	vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 };
+const ASYNC_BACKING_PARAMETERS: AsyncBackingParams =
+	AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 };
 
 fn get_parent_hash(hash: Hash) -> Hash {
 	Hash::from_low_u64_be(hash.to_low_u64_be() + 1)
@@ -97,7 +97,7 @@ async fn update_view(
 			overseer_recv(virtual_overseer).await,
 			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 				parent,
-				RuntimeApiRequest::StagingAsyncBackingParams(tx),
+				RuntimeApiRequest::AsyncBackingParams(tx),
 			)) => {
 				tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap();
 				(parent, new_view.get(&parent).copied().expect("Unknown parent requested"))
@@ -226,8 +226,8 @@ async fn assert_collation_seconded(
 		overseer_recv(virtual_overseer).await,
 		AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendCollationMessage(
 			peers,
-			Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol(
-				protocol_vstaging::CollatorProtocolMessage::CollationSeconded(
+			Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(
+				protocol_v2::CollatorProtocolMessage::CollationSeconded(
 					_relay_parent,
 					..,
 				),
@@ -306,7 +306,7 @@ fn accept_advertisements_from_implicit_view() {
 			peer_a,
 			pair_a.clone(),
 			test_state.chain_ids[0],
-			CollationVersion::VStaging,
+			CollationVersion::V2,
 		)
 		.await;
 		connect_and_declare_collator(
@@ -314,7 +314,7 @@ fn accept_advertisements_from_implicit_view() {
 			peer_b,
 			pair_b.clone(),
 			test_state.chain_ids[1],
-			CollationVersion::VStaging,
+			CollationVersion::V2,
 		)
 		.await;
 
@@ -406,7 +406,7 @@ fn second_multiple_candidates_per_relay_parent() {
 			peer_a,
 			pair.clone(),
 			test_state.chain_ids[0],
-			CollationVersion::VStaging,
+			CollationVersion::V2,
 		)
 		.await;
 
@@ -457,7 +457,7 @@ fn second_multiple_candidates_per_relay_parent() {
 			let pov = PoV { block_data: BlockData(vec![1]) };
 
 			response_channel
-				.send(Ok(request_vstaging::CollationFetchingResponse::Collation(
+				.send(Ok(request_v2::CollationFetchingResponse::Collation(
 					candidate.clone(),
 					pov.clone(),
 				)
@@ -514,7 +514,7 @@ fn second_multiple_candidates_per_relay_parent() {
 			peer_b,
 			pair_b.clone(),
 			test_state.chain_ids[0],
-			CollationVersion::VStaging,
+			CollationVersion::V2,
 		)
 		.await;
 
@@ -562,7 +562,7 @@ fn fetched_collation_sanity_check() {
 			peer_a,
 			pair.clone(),
 			test_state.chain_ids[0],
-			CollationVersion::VStaging,
+			CollationVersion::V2,
 		)
 		.await;
 
@@ -611,7 +611,7 @@ fn fetched_collation_sanity_check() {
 		let pov = PoV { block_data: BlockData(vec![1]) };
 
 		response_channel
-			.send(Ok(request_vstaging::CollationFetchingResponse::Collation(
+			.send(Ok(request_v2::CollationFetchingResponse::Collation(
 				candidate.clone(),
 				pov.clone(),
 			)
@@ -668,7 +668,7 @@ fn advertisement_spam_protection() {
 			peer_a,
 			pair_a.clone(),
 			test_state.chain_ids[1],
-			CollationVersion::VStaging,
+			CollationVersion::V2,
 		)
 		.await;
 
@@ -748,7 +748,7 @@ fn backed_candidate_unblocks_advertisements() {
 			peer_a,
 			pair_a.clone(),
 			test_state.chain_ids[0],
-			CollationVersion::VStaging,
+			CollationVersion::V2,
 		)
 		.await;
 		connect_and_declare_collator(
@@ -756,7 +756,7 @@ fn backed_candidate_unblocks_advertisements() {
 			peer_b,
 			pair_b.clone(),
 			test_state.chain_ids[1],
-			CollationVersion::VStaging,
+			CollationVersion::V2,
 		)
 		.await;
 
@@ -856,7 +856,7 @@ fn active_leave_unblocks_advertisements() {
 				*peer_id,
 				peer.clone(),
 				test_state.chain_ids[0],
-				CollationVersion::VStaging,
+				CollationVersion::V2,
 			)
 			.await;
 		}
diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs
index c5dc1ba14bd..4fa23507e86 100644
--- a/polkadot/node/network/gossip-support/src/lib.rs
+++ b/polkadot/node/network/gossip-support/src/lib.rs
@@ -452,7 +452,7 @@ where
 				// match void -> LLVM unreachable
 				match message {
 					Versioned::V1(m) => match m {},
-					Versioned::VStaging(m) => match m {},
+					Versioned::V2(m) => match m {},
 				}
 			},
 		}
diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml
index c33b9eae325..379334ded24 100644
--- a/polkadot/node/network/protocol/Cargo.toml
+++ b/polkadot/node/network/protocol/Cargo.toml
@@ -27,6 +27,3 @@ bitvec = "1"
 
 [dev-dependencies]
 rand_chacha = "0.3.1"
-
-[features]
-network-protocol-staging = []
diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs
index 1bed2c12fe2..901ac99b669 100644
--- a/polkadot/node/network/protocol/src/lib.rs
+++ b/polkadot/node/network/protocol/src/lib.rs
@@ -253,26 +253,25 @@ impl View {
 
 /// A protocol-versioned type.
 #[derive(Debug, Clone, PartialEq, Eq)]
-pub enum Versioned<V1, VStaging> {
+pub enum Versioned<V1, V2> {
 	/// V1 type.
 	V1(V1),
-	/// VStaging type.
-	VStaging(VStaging),
+	/// V2 type.
+	V2(V2),
 }
 
-impl<V1: Clone, VStaging: Clone> Versioned<&'_ V1, &'_ VStaging> {
+impl<V1: Clone, V2: Clone> Versioned<&'_ V1, &'_ V2> {
 	/// Convert to a fully-owned version of the message.
-	pub fn clone_inner(&self) -> Versioned<V1, VStaging> {
+	pub fn clone_inner(&self) -> Versioned<V1, V2> {
 		match *self {
 			Versioned::V1(inner) => Versioned::V1(inner.clone()),
-			Versioned::VStaging(inner) => Versioned::VStaging(inner.clone()),
+			Versioned::V2(inner) => Versioned::V2(inner.clone()),
 		}
 	}
 }
 
 /// All supported versions of the validation protocol message.
-pub type VersionedValidationProtocol =
-	Versioned<v1::ValidationProtocol, vstaging::ValidationProtocol>;
+pub type VersionedValidationProtocol = Versioned<v1::ValidationProtocol, v2::ValidationProtocol>;
 
 impl From<v1::ValidationProtocol> for VersionedValidationProtocol {
 	fn from(v1: v1::ValidationProtocol) -> Self {
@@ -280,14 +279,14 @@ impl From<v1::ValidationProtocol> for VersionedValidationProtocol {
 	}
 }
 
-impl From<vstaging::ValidationProtocol> for VersionedValidationProtocol {
-	fn from(vstaging: vstaging::ValidationProtocol) -> Self {
-		VersionedValidationProtocol::VStaging(vstaging)
+impl From<v2::ValidationProtocol> for VersionedValidationProtocol {
+	fn from(v2: v2::ValidationProtocol) -> Self {
+		VersionedValidationProtocol::V2(v2)
 	}
 }
 
 /// All supported versions of the collation protocol message.
-pub type VersionedCollationProtocol = Versioned<v1::CollationProtocol, vstaging::CollationProtocol>;
+pub type VersionedCollationProtocol = Versioned<v1::CollationProtocol, v2::CollationProtocol>;
 
 impl From<v1::CollationProtocol> for VersionedCollationProtocol {
 	fn from(v1: v1::CollationProtocol) -> Self {
@@ -295,9 +294,9 @@ impl From<v1::CollationProtocol> for VersionedCollationProtocol {
 	}
 }
 
-impl From<vstaging::CollationProtocol> for VersionedCollationProtocol {
-	fn from(vstaging: vstaging::CollationProtocol) -> Self {
-		VersionedCollationProtocol::VStaging(vstaging)
+impl From<v2::CollationProtocol> for VersionedCollationProtocol {
+	fn from(v2: v2::CollationProtocol) -> Self {
+		VersionedCollationProtocol::V2(v2)
 	}
 }
 
@@ -307,7 +306,7 @@ macro_rules! impl_versioned_full_protocol_from {
 			fn from(versioned_from: $from) -> $out {
 				match versioned_from {
 					Versioned::V1(x) => Versioned::V1(x.into()),
-					Versioned::VStaging(x) => Versioned::VStaging(x.into()),
+					Versioned::V2(x) => Versioned::V2(x.into()),
 				}
 			}
 		}
@@ -321,7 +320,7 @@ macro_rules! impl_versioned_try_from {
 		$from:ty,
 		$out:ty,
 		$v1_pat:pat => $v1_out:expr,
-		$vstaging_pat:pat => $vstaging_out:expr
+		$v2_pat:pat => $v2_out:expr
 	) => {
 		impl TryFrom<$from> for $out {
 			type Error = crate::WrongVariant;
@@ -330,7 +329,7 @@ macro_rules! impl_versioned_try_from {
 				#[allow(unreachable_patterns)] // when there is only one variant
 				match x {
 					Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out)),
-					Versioned::VStaging($vstaging_pat) => Ok(Versioned::VStaging($vstaging_out)),
+					Versioned::V2($v2_pat) => Ok(Versioned::V2($v2_out)),
 					_ => Err(crate::WrongVariant),
 				}
 			}
@@ -343,8 +342,7 @@ macro_rules! impl_versioned_try_from {
 				#[allow(unreachable_patterns)] // when there is only one variant
 				match x {
 					Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out.clone())),
-					Versioned::VStaging($vstaging_pat) =>
-						Ok(Versioned::VStaging($vstaging_out.clone())),
+					Versioned::V2($v2_pat) => Ok(Versioned::V2($v2_out.clone())),
 					_ => Err(crate::WrongVariant),
 				}
 			}
@@ -354,7 +352,7 @@ macro_rules! impl_versioned_try_from {
 
 /// Version-annotated messages used by the bitfield distribution subsystem.
 pub type BitfieldDistributionMessage =
-	Versioned<v1::BitfieldDistributionMessage, vstaging::BitfieldDistributionMessage>;
+	Versioned<v1::BitfieldDistributionMessage, v2::BitfieldDistributionMessage>;
 impl_versioned_full_protocol_from!(
 	BitfieldDistributionMessage,
 	VersionedValidationProtocol,
@@ -364,12 +362,12 @@ impl_versioned_try_from!(
 	VersionedValidationProtocol,
 	BitfieldDistributionMessage,
 	v1::ValidationProtocol::BitfieldDistribution(x) => x,
-	vstaging::ValidationProtocol::BitfieldDistribution(x) => x
+	v2::ValidationProtocol::BitfieldDistribution(x) => x
 );
 
 /// Version-annotated messages used by the statement distribution subsystem.
 pub type StatementDistributionMessage =
-	Versioned<v1::StatementDistributionMessage, vstaging::StatementDistributionMessage>;
+	Versioned<v1::StatementDistributionMessage, v2::StatementDistributionMessage>;
 impl_versioned_full_protocol_from!(
 	StatementDistributionMessage,
 	VersionedValidationProtocol,
@@ -379,12 +377,12 @@ impl_versioned_try_from!(
 	VersionedValidationProtocol,
 	StatementDistributionMessage,
 	v1::ValidationProtocol::StatementDistribution(x) => x,
-	vstaging::ValidationProtocol::StatementDistribution(x) => x
+	v2::ValidationProtocol::StatementDistribution(x) => x
 );
 
 /// Version-annotated messages used by the approval distribution subsystem.
 pub type ApprovalDistributionMessage =
-	Versioned<v1::ApprovalDistributionMessage, vstaging::ApprovalDistributionMessage>;
+	Versioned<v1::ApprovalDistributionMessage, v2::ApprovalDistributionMessage>;
 impl_versioned_full_protocol_from!(
 	ApprovalDistributionMessage,
 	VersionedValidationProtocol,
@@ -394,13 +392,13 @@ impl_versioned_try_from!(
 	VersionedValidationProtocol,
 	ApprovalDistributionMessage,
 	v1::ValidationProtocol::ApprovalDistribution(x) => x,
-	vstaging::ValidationProtocol::ApprovalDistribution(x) => x
+	v2::ValidationProtocol::ApprovalDistribution(x) => x
 
 );
 
 /// Version-annotated messages used by the gossip-support subsystem (this is void).
 pub type GossipSupportNetworkMessage =
-	Versioned<v1::GossipSupportNetworkMessage, vstaging::GossipSupportNetworkMessage>;
+	Versioned<v1::GossipSupportNetworkMessage, v2::GossipSupportNetworkMessage>;
 // This is a void enum placeholder, so never gets sent over the wire.
 impl TryFrom<VersionedValidationProtocol> for GossipSupportNetworkMessage {
 	type Error = WrongVariant;
@@ -418,7 +416,7 @@ impl<'a> TryFrom<&'a VersionedValidationProtocol> for GossipSupportNetworkMessag
 
 /// Version-annotated messages used by the bitfield distribution subsystem.
 pub type CollatorProtocolMessage =
-	Versioned<v1::CollatorProtocolMessage, vstaging::CollatorProtocolMessage>;
+	Versioned<v1::CollatorProtocolMessage, v2::CollatorProtocolMessage>;
 impl_versioned_full_protocol_from!(
 	CollatorProtocolMessage,
 	VersionedCollationProtocol,
@@ -428,7 +426,7 @@ impl_versioned_try_from!(
 	VersionedCollationProtocol,
 	CollatorProtocolMessage,
 	v1::CollationProtocol::CollatorProtocol(x) => x,
-	vstaging::CollationProtocol::CollatorProtocol(x) => x
+	v2::CollationProtocol::CollatorProtocol(x) => x
 );
 
 /// v1 notification protocol types.
@@ -589,12 +587,12 @@ pub mod v1 {
 	}
 }
 
-/// vstaging network protocol types.
-pub mod vstaging {
+/// v2 network protocol types.
+pub mod v2 {
 	use bitvec::{order::Lsb0, slice::BitSlice, vec::BitVec};
 	use parity_scale_codec::{Decode, Encode};
 
-	use polkadot_primitives::vstaging::{
+	use polkadot_primitives::{
 		CandidateHash, CandidateIndex, CollatorId, CollatorSignature, GroupIndex, Hash,
 		Id as ParaId, UncheckedSignedAvailabilityBitfield, UncheckedSignedStatement,
 	};
diff --git a/polkadot/node/network/protocol/src/peer_set.rs b/polkadot/node/network/protocol/src/peer_set.rs
index c2163783c2c..8dd68b297e3 100644
--- a/polkadot/node/network/protocol/src/peer_set.rs
+++ b/polkadot/node/network/protocol/src/peer_set.rs
@@ -118,16 +118,9 @@ impl PeerSet {
 	/// Networking layer relies on `get_main_version()` being the version
 	/// of the main protocol name reported by [`PeerSetProtocolNames::get_main_name()`].
 	pub fn get_main_version(self) -> ProtocolVersion {
-		#[cfg(not(feature = "network-protocol-staging"))]
 		match self {
-			PeerSet::Validation => ValidationVersion::V1.into(),
-			PeerSet::Collation => CollationVersion::V1.into(),
-		}
-
-		#[cfg(feature = "network-protocol-staging")]
-		match self {
-			PeerSet::Validation => ValidationVersion::VStaging.into(),
-			PeerSet::Collation => CollationVersion::VStaging.into(),
+			PeerSet::Validation => ValidationVersion::V2.into(),
+			PeerSet::Collation => CollationVersion::V2.into(),
 		}
 	}
 
@@ -152,7 +145,7 @@ impl PeerSet {
 			PeerSet::Validation =>
 				if version == ValidationVersion::V1.into() {
 					Some("validation/1")
-				} else if version == ValidationVersion::VStaging.into() {
+				} else if version == ValidationVersion::V2.into() {
 					Some("validation/2")
 				} else {
 					None
@@ -160,7 +153,7 @@ impl PeerSet {
 			PeerSet::Collation =>
 				if version == CollationVersion::V1.into() {
 					Some("collation/1")
-				} else if version == CollationVersion::VStaging.into() {
+				} else if version == CollationVersion::V2.into() {
 					Some("collation/2")
 				} else {
 					None
@@ -223,8 +216,8 @@ impl From<ProtocolVersion> for u32 {
 pub enum ValidationVersion {
 	/// The first version.
 	V1 = 1,
-	/// The staging version.
-	VStaging = 2,
+	/// The second version.
+	V2 = 2,
 }
 
 /// Supported collation protocol versions. Only versions defined here must be used in the codebase.
@@ -232,8 +225,8 @@ pub enum ValidationVersion {
 pub enum CollationVersion {
 	/// The first version.
 	V1 = 1,
-	/// The staging version.
-	VStaging = 2,
+	/// The second version.
+	V2 = 2,
 }
 
 /// Marker indicating the version is unknown.
diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs
index baed4b84631..96f7adeb29b 100644
--- a/polkadot/node/network/protocol/src/request_response/mod.rs
+++ b/polkadot/node/network/protocol/src/request_response/mod.rs
@@ -55,7 +55,7 @@ pub use outgoing::{OutgoingRequest, OutgoingResult, Recipient, Requests, Respons
 pub mod v1;
 
 /// Actual versioned requests and responses that are sent over the wire.
-pub mod vstaging;
+pub mod v2;
 
 /// A protocol per subsystem seems to make the most sense, this way we don't need any dispatching
 /// within protocols.
@@ -66,7 +66,7 @@ pub enum Protocol {
 	/// Protocol for fetching collations from collators.
 	CollationFetchingV1,
 	/// Protocol for fetching collations from collators when async backing is enabled.
-	CollationFetchingVStaging,
+	CollationFetchingV2,
 	/// Protocol for fetching seconded PoVs from validators of the same group.
 	PoVFetchingV1,
 	/// Protocol for fetching available data.
@@ -78,7 +78,7 @@ pub enum Protocol {
 
 	/// Protocol for requesting candidates with attestations in statement distribution
 	/// when async backing is enabled.
-	AttestedCandidateVStaging,
+	AttestedCandidateV2,
 }
 
 /// Minimum bandwidth we expect for validators - 500Mbit/s is the recommendation, so approximately
@@ -147,7 +147,7 @@ const POV_RESPONSE_SIZE: u64 = MAX_POV_SIZE as u64 + 10_000;
 /// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead.
 const STATEMENT_RESPONSE_SIZE: u64 = MAX_CODE_SIZE as u64 + 10_000;
 
-/// Maximum response sizes for `AttestedCandidateVStaging`.
+/// Maximum response sizes for `AttestedCandidateV2`.
 ///
 /// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead and
 /// additional backing statements.
@@ -199,7 +199,7 @@ impl Protocol {
 				request_timeout: CHUNK_REQUEST_TIMEOUT,
 				inbound_queue: tx,
 			},
-			Protocol::CollationFetchingV1 | Protocol::CollationFetchingVStaging =>
+			Protocol::CollationFetchingV1 | Protocol::CollationFetchingV2 =>
 				RequestResponseConfig {
 					name,
 					fallback_names,
@@ -254,7 +254,7 @@ impl Protocol {
 				request_timeout: DISPUTE_REQUEST_TIMEOUT,
 				inbound_queue: tx,
 			},
-			Protocol::AttestedCandidateVStaging => RequestResponseConfig {
+			Protocol::AttestedCandidateV2 => RequestResponseConfig {
 				name,
 				fallback_names,
 				max_request_size: 1_000,
@@ -275,7 +275,7 @@ impl Protocol {
 			// as well.
 			Protocol::ChunkFetchingV1 => 100,
 			// 10 seems reasonable, considering group sizes of max 10 validators.
-			Protocol::CollationFetchingV1 | Protocol::CollationFetchingVStaging => 10,
+			Protocol::CollationFetchingV1 | Protocol::CollationFetchingV2 => 10,
 			// 10 seems reasonable, considering group sizes of max 10 validators.
 			Protocol::PoVFetchingV1 => 10,
 			// Validators are constantly self-selecting to request available data which may lead
@@ -307,7 +307,7 @@ impl Protocol {
 			// failure, so having a good value here is mostly about performance tuning.
 			Protocol::DisputeSendingV1 => 100,
 
-			Protocol::AttestedCandidateVStaging => {
+			Protocol::AttestedCandidateV2 => {
 				// We assume we can utilize up to 70% of the available bandwidth for statements.
 				// This is just a guess/estimate, with the following considerations: If we are
 				// faster than that, queue size will stay low anyway, even if not - requesters will
@@ -344,8 +344,8 @@ impl Protocol {
 			Protocol::DisputeSendingV1 => Some("/polkadot/send_dispute/1"),
 
 			// Introduced after legacy names became legacy.
-			Protocol::AttestedCandidateVStaging => None,
-			Protocol::CollationFetchingVStaging => None,
+			Protocol::AttestedCandidateV2 => None,
+			Protocol::CollationFetchingV2 => None,
 		}
 	}
 }
@@ -402,8 +402,8 @@ impl ReqProtocolNames {
 			Protocol::StatementFetchingV1 => "/req_statement/1",
 			Protocol::DisputeSendingV1 => "/send_dispute/1",
 
-			Protocol::CollationFetchingVStaging => "/req_collation/2",
-			Protocol::AttestedCandidateVStaging => "/req_attested_candidate/2",
+			Protocol::CollationFetchingV2 => "/req_collation/2",
+			Protocol::AttestedCandidateV2 => "/req_attested_candidate/2",
 		};
 
 		format!("{}{}", prefix, short_name).into()
diff --git a/polkadot/node/network/protocol/src/request_response/outgoing.rs b/polkadot/node/network/protocol/src/request_response/outgoing.rs
index ddc6b85645b..c613d5778f5 100644
--- a/polkadot/node/network/protocol/src/request_response/outgoing.rs
+++ b/polkadot/node/network/protocol/src/request_response/outgoing.rs
@@ -23,7 +23,7 @@ use sc_network::PeerId;
 
 use polkadot_primitives::AuthorityDiscoveryId;
 
-use super::{v1, vstaging, IsRequest, Protocol};
+use super::{v1, v2, IsRequest, Protocol};
 
 /// All requests that can be sent to the network bridge via `NetworkBridgeTxMessage::SendRequest`.
 #[derive(Debug)]
@@ -42,10 +42,10 @@ pub enum Requests {
 	DisputeSendingV1(OutgoingRequest<v1::DisputeRequest>),
 
 	/// Request a candidate and attestations.
-	AttestedCandidateVStaging(OutgoingRequest<vstaging::AttestedCandidateRequest>),
+	AttestedCandidateV2(OutgoingRequest<v2::AttestedCandidateRequest>),
 	/// Fetch a collation from a collator which previously announced it.
 	/// Compared to V1 it requires specifying which candidate is requested by its hash.
-	CollationFetchingVStaging(OutgoingRequest<vstaging::CollationFetchingRequest>),
+	CollationFetchingV2(OutgoingRequest<v2::CollationFetchingRequest>),
 }
 
 impl Requests {
@@ -54,12 +54,12 @@ impl Requests {
 		match self {
 			Self::ChunkFetchingV1(_) => Protocol::ChunkFetchingV1,
 			Self::CollationFetchingV1(_) => Protocol::CollationFetchingV1,
-			Self::CollationFetchingVStaging(_) => Protocol::CollationFetchingVStaging,
+			Self::CollationFetchingV2(_) => Protocol::CollationFetchingV2,
 			Self::PoVFetchingV1(_) => Protocol::PoVFetchingV1,
 			Self::AvailableDataFetchingV1(_) => Protocol::AvailableDataFetchingV1,
 			Self::StatementFetchingV1(_) => Protocol::StatementFetchingV1,
 			Self::DisputeSendingV1(_) => Protocol::DisputeSendingV1,
-			Self::AttestedCandidateVStaging(_) => Protocol::AttestedCandidateVStaging,
+			Self::AttestedCandidateV2(_) => Protocol::AttestedCandidateV2,
 		}
 	}
 
@@ -74,12 +74,12 @@ impl Requests {
 		match self {
 			Self::ChunkFetchingV1(r) => r.encode_request(),
 			Self::CollationFetchingV1(r) => r.encode_request(),
-			Self::CollationFetchingVStaging(r) => r.encode_request(),
+			Self::CollationFetchingV2(r) => r.encode_request(),
 			Self::PoVFetchingV1(r) => r.encode_request(),
 			Self::AvailableDataFetchingV1(r) => r.encode_request(),
 			Self::StatementFetchingV1(r) => r.encode_request(),
 			Self::DisputeSendingV1(r) => r.encode_request(),
-			Self::AttestedCandidateVStaging(r) => r.encode_request(),
+			Self::AttestedCandidateV2(r) => r.encode_request(),
 		}
 	}
 }
diff --git a/polkadot/node/network/protocol/src/request_response/vstaging.rs b/polkadot/node/network/protocol/src/request_response/v2.rs
similarity index 93%
rename from polkadot/node/network/protocol/src/request_response/vstaging.rs
rename to polkadot/node/network/protocol/src/request_response/v2.rs
index 34a17b4baaa..6b90c579237 100644
--- a/polkadot/node/network/protocol/src/request_response/vstaging.rs
+++ b/polkadot/node/network/protocol/src/request_response/v2.rs
@@ -18,13 +18,13 @@
 
 use parity_scale_codec::{Decode, Encode};
 
-use polkadot_primitives::vstaging::{
+use polkadot_primitives::{
 	CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, PersistedValidationData,
 	UncheckedSignedStatement,
 };
 
 use super::{IsRequest, Protocol};
-use crate::vstaging::StatementFilter;
+use crate::v2::StatementFilter;
 
 /// Request a candidate with statements.
 #[derive(Debug, Clone, Encode, Decode)]
@@ -56,7 +56,7 @@ pub struct AttestedCandidateResponse {
 
 impl IsRequest for AttestedCandidateRequest {
 	type Response = AttestedCandidateResponse;
-	const PROTOCOL: Protocol = Protocol::AttestedCandidateVStaging;
+	const PROTOCOL: Protocol = Protocol::AttestedCandidateV2;
 }
 
 /// Responses as sent by collators.
@@ -76,5 +76,5 @@ pub struct CollationFetchingRequest {
 impl IsRequest for CollationFetchingRequest {
 	// The response is the same as for V1.
 	type Response = CollationFetchingResponse;
-	const PROTOCOL: Protocol = Protocol::CollationFetchingVStaging;
+	const PROTOCOL: Protocol = Protocol::CollationFetchingV2;
 }
diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs
index 9ae76047383..fc2aff0da30 100644
--- a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs
+++ b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs
@@ -21,8 +21,7 @@ use polkadot_node_network_protocol::{
 	grid_topology::{GridNeighbors, RequiredRouting, SessionBoundGridTopologyStorage},
 	peer_set::{IsAuthority, PeerSet, ValidationVersion},
 	v1::{self as protocol_v1, StatementMetadata},
-	vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep,
-	Versioned, View,
+	v2 as protocol_v2, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View,
 };
 use polkadot_node_primitives::{
 	SignedFullStatement, Statement, StatementWithPVD, UncheckedSignedFullStatement,
@@ -1062,7 +1061,7 @@ async fn circulate_statement<'a, Context>(
 		"We filter out duplicates above. qed.",
 	);
 
-	let (v1_peers_to_send, vstaging_peers_to_send) = peers_to_send
+	let (v1_peers_to_send, v2_peers_to_send) = peers_to_send
 		.into_iter()
 		.map(|peer_id| {
 			let peer_data =
@@ -1074,7 +1073,7 @@ async fn circulate_statement<'a, Context>(
 		})
 		.partition::<Vec<_>, _>(|(_, _, version)| match version {
 			ValidationVersion::V1 => true,
-			ValidationVersion::VStaging => false,
+			ValidationVersion::V2 => false,
 		}); // partition is handy here but not if we add more protocol versions
 
 	let payload = v1_statement_message(relay_parent, stored.statement.clone(), metrics);
@@ -1094,24 +1093,24 @@ async fn circulate_statement<'a, Context>(
 		))
 		.await;
 	}
-	if !vstaging_peers_to_send.is_empty() {
+	if !v2_peers_to_send.is_empty() {
 		gum::trace!(
 			target: LOG_TARGET,
-			?vstaging_peers_to_send,
+			?v2_peers_to_send,
 			?relay_parent,
 			statement = ?stored.statement,
-			"Sending statement to vstaging peers",
+			"Sending statement to v2 peers",
 		);
 		ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
-			vstaging_peers_to_send.iter().map(|(p, _, _)| *p).collect(),
-			compatible_v1_message(ValidationVersion::VStaging, payload.clone()).into(),
+			v2_peers_to_send.iter().map(|(p, _, _)| *p).collect(),
+			compatible_v1_message(ValidationVersion::V2, payload.clone()).into(),
 		))
 		.await;
 	}
 
 	v1_peers_to_send
 		.into_iter()
-		.chain(vstaging_peers_to_send)
+		.chain(v2_peers_to_send)
 		.filter_map(|(peer, needs_dependent, _)| if needs_dependent { Some(peer) } else { None })
 		.collect()
 }
@@ -1443,10 +1442,8 @@ async fn handle_incoming_message<'a, Context>(
 
 	let message = match message {
 		Versioned::V1(m) => m,
-		Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility(
-			m,
-		)) => m,
-		Versioned::VStaging(_) => {
+		Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(m)) => m,
+		Versioned::V2(_) => {
 			// The higher-level subsystem code is supposed to filter out
 			// all non v1 messages.
 			gum::debug!(
@@ -2170,8 +2167,7 @@ fn compatible_v1_message(
 ) -> net_protocol::StatementDistributionMessage {
 	match version {
 		ValidationVersion::V1 => Versioned::V1(message),
-		ValidationVersion::VStaging => Versioned::VStaging(
-			protocol_vstaging::StatementDistributionMessage::V1Compatibility(message),
-		),
+		ValidationVersion::V2 =>
+			Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(message)),
 	}
 }
diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs
index 17a66a9ff79..ca3038f9b3f 100644
--- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs
+++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs
@@ -793,7 +793,7 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() {
 		assert_matches!(
 			handle.recv().await,
 			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx))
+				RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx))
 			)
 				if r == hash_a
 			=> {
@@ -1033,7 +1033,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing(
 		assert_matches!(
 			handle.recv().await,
 			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx))
+				RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx))
 			)
 				if r == hash_a
 			=> {
@@ -1563,7 +1563,7 @@ fn delay_reputation_changes() {
 		assert_matches!(
 			handle.recv().await,
 			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx))
+				RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx))
 			)
 				if r == hash_a
 			=> {
@@ -2043,7 +2043,7 @@ fn share_prioritizes_backing_group() {
 		assert_matches!(
 			handle.recv().await,
 			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx))
+				RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx))
 			)
 				if r == hash_a
 			=> {
@@ -2365,7 +2365,7 @@ fn peer_cant_flood_with_large_statements() {
 		assert_matches!(
 			handle.recv().await,
 			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx))
+				RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx))
 			)
 				if r == hash_a
 			=> {
@@ -2590,7 +2590,7 @@ fn handle_multiple_seconded_statements() {
 		assert_matches!(
 			handle.recv().await,
 			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx))
+				RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx))
 			)
 				if r == relay_parent_hash
 			=> {
diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs
index b2eb9cccced..eead7df5224 100644
--- a/polkadot/node/network/statement-distribution/src/lib.rs
+++ b/polkadot/node/network/statement-distribution/src/lib.rs
@@ -26,10 +26,8 @@ use error::{log_error, FatalResult};
 use std::time::Duration;
 
 use polkadot_node_network_protocol::{
-	request_response::{
-		v1 as request_v1, vstaging::AttestedCandidateRequest, IncomingRequestReceiver,
-	},
-	vstaging as protocol_vstaging, Versioned,
+	request_response::{v1 as request_v1, v2::AttestedCandidateRequest, IncomingRequestReceiver},
+	v2 as protocol_v2, Versioned,
 };
 use polkadot_node_primitives::StatementWithPVD;
 use polkadot_node_subsystem::{
@@ -60,7 +58,7 @@ use legacy_v1::{
 	ResponderMessage as V1ResponderMessage,
 };
 
-mod vstaging;
+mod v2;
 
 const LOG_TARGET: &str = "parachain::statement-distribution";
 
@@ -104,9 +102,9 @@ enum MuxedMessage {
 	/// Messages from spawned v1 (legacy) responder background task.
 	V1Responder(Option<V1ResponderMessage>),
 	/// Messages from candidate responder background task.
-	Responder(Option<vstaging::ResponderMessage>),
+	Responder(Option<v2::ResponderMessage>),
 	/// Messages from answered requests.
-	Response(vstaging::UnhandledResponse),
+	Response(v2::UnhandledResponse),
 	/// Message that a request is ready to be retried. This just acts as a signal that we should
 	/// dispatch all pending requests again.
 	RetryRequest(()),
@@ -116,10 +114,10 @@ enum MuxedMessage {
 impl MuxedMessage {
 	async fn receive<Context>(
 		ctx: &mut Context,
-		state: &mut vstaging::State,
+		state: &mut v2::State,
 		from_v1_requester: &mut mpsc::Receiver<V1RequesterMessage>,
 		from_v1_responder: &mut mpsc::Receiver<V1ResponderMessage>,
-		from_responder: &mut mpsc::Receiver<vstaging::ResponderMessage>,
+		from_responder: &mut mpsc::Receiver<v2::ResponderMessage>,
 	) -> MuxedMessage {
 		let (request_manager, response_manager) = state.request_and_response_managers();
 		// We are only fusing here to make `select` happy, in reality we will quit if one of those
@@ -128,8 +126,8 @@ impl MuxedMessage {
 		let from_v1_requester = from_v1_requester.next();
 		let from_v1_responder = from_v1_responder.next();
 		let from_responder = from_responder.next();
-		let receive_response = vstaging::receive_response(response_manager).fuse();
-		let retry_request = vstaging::next_retry(request_manager).fuse();
+		let receive_response = v2::receive_response(response_manager).fuse();
+		let retry_request = v2::next_retry(request_manager).fuse();
 		futures::pin_mut!(
 			from_orchestra,
 			from_v1_requester,
@@ -182,7 +180,7 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
 		let mut reputation_delay = new_reputation_delay();
 
 		let mut legacy_v1_state = crate::legacy_v1::State::new(self.keystore.clone());
-		let mut state = crate::vstaging::State::new(self.keystore.clone());
+		let mut state = crate::v2::State::new(self.keystore.clone());
 
 		// Sender/Receiver for getting news from our statement fetching tasks.
 		let (v1_req_sender, mut v1_req_receiver) = mpsc::channel(1);
@@ -206,7 +204,7 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
 
 		ctx.spawn(
 			"candidate-responder",
-			vstaging::respond_task(
+			v2::respond_task(
 				self.req_receiver.take().expect("Mandatory argument to new. qed"),
 				res_sender.clone(),
 			)
@@ -280,14 +278,13 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
 					)?;
 				},
 				MuxedMessage::Responder(result) => {
-					vstaging::answer_request(
+					v2::answer_request(
 						&mut state,
 						result.ok_or(FatalError::RequesterReceiverFinished)?,
 					);
 				},
 				MuxedMessage::Response(result) => {
-					vstaging::handle_response(&mut ctx, &mut state, result, &mut self.reputation)
-						.await;
+					v2::handle_response(&mut ctx, &mut state, result, &mut self.reputation).await;
 				},
 				MuxedMessage::RetryRequest(()) => {
 					// A pending request is ready to retry. This is only a signal to call
@@ -296,7 +293,7 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
 				},
 			};
 
-			vstaging::dispatch_requests(&mut ctx, &mut state).await;
+			v2::dispatch_requests(&mut ctx, &mut state).await;
 		}
 		Ok(())
 	}
@@ -304,7 +301,7 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
 	async fn handle_subsystem_message<Context>(
 		&mut self,
 		ctx: &mut Context,
-		state: &mut vstaging::State,
+		state: &mut v2::State,
 		legacy_v1_state: &mut legacy_v1::State,
 		v1_req_sender: &mpsc::Sender<V1RequesterMessage>,
 		message: FromOrchestra<StatementDistributionMessage>,
@@ -318,11 +315,11 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
 			})) => {
 				let _timer = metrics.time_active_leaves_update();
 
-				// vstaging should handle activated first because of implicit view.
+				// v2 should handle activated first because of implicit view.
 				if let Some(ref activated) = activated {
 					let mode = prospective_parachains_mode(ctx.sender(), activated.hash).await?;
 					if let ProspectiveParachainsMode::Enabled { .. } = mode {
-						vstaging::handle_active_leaves_update(ctx, state, activated, mode).await?;
+						v2::handle_active_leaves_update(ctx, state, activated, mode).await?;
 					} else if let ProspectiveParachainsMode::Disabled = mode {
 						for deactivated in &deactivated {
 							crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated);
@@ -339,7 +336,7 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
 					for deactivated in &deactivated {
 						crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated);
 					}
-					vstaging::handle_deactivate_leaves(state, &deactivated);
+					v2::handle_deactivate_leaves(state, &deactivated);
 				}
 			},
 			FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {
@@ -362,7 +359,7 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
 						)
 						.await?;
 					} else {
-						vstaging::share_local_statement(
+						v2::share_local_statement(
 							ctx,
 							state,
 							relay_parent,
@@ -399,11 +396,11 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
 
 					let target = match &event {
 						NetworkBridgeEvent::PeerMessage(_, message) => match message {
-							Versioned::VStaging(
-								protocol_vstaging::StatementDistributionMessage::V1Compatibility(_),
+							Versioned::V2(
+								protocol_v2::StatementDistributionMessage::V1Compatibility(_),
 							) => VersionTarget::Legacy,
 							Versioned::V1(_) => VersionTarget::Legacy,
-							Versioned::VStaging(_) => VersionTarget::Current,
+							Versioned::V2(_) => VersionTarget::Current,
 						},
 						_ => VersionTarget::Both,
 					};
@@ -422,14 +419,12 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
 					}
 
 					if target.targets_current() {
-						// pass to vstaging.
-						vstaging::handle_network_update(ctx, state, event, &mut self.reputation)
-							.await;
+						// pass to v2.
+						v2::handle_network_update(ctx, state, event, &mut self.reputation).await;
 					}
 				},
 				StatementDistributionMessage::Backed(candidate_hash) => {
-					crate::vstaging::handle_backed_candidate_message(ctx, state, candidate_hash)
-						.await;
+					crate::v2::handle_backed_candidate_message(ctx, state, candidate_hash).await;
 				},
 			},
 		}
diff --git a/polkadot/node/network/statement-distribution/src/vstaging/candidates.rs b/polkadot/node/network/statement-distribution/src/v2/candidates.rs
similarity index 99%
rename from polkadot/node/network/statement-distribution/src/vstaging/candidates.rs
rename to polkadot/node/network/statement-distribution/src/v2/candidates.rs
index d6b68510f1c..e660df5da17 100644
--- a/polkadot/node/network/statement-distribution/src/vstaging/candidates.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/candidates.rs
@@ -27,7 +27,7 @@
 
 use polkadot_node_network_protocol::PeerId;
 use polkadot_node_subsystem::messages::HypotheticalCandidate;
-use polkadot_primitives::vstaging::{
+use polkadot_primitives::{
 	CandidateHash, CommittedCandidateReceipt, GroupIndex, Hash, Id as ParaId,
 	PersistedValidationData,
 };
diff --git a/polkadot/node/network/statement-distribution/src/vstaging/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/cluster.rs
similarity index 99%
rename from polkadot/node/network/statement-distribution/src/vstaging/cluster.rs
rename to polkadot/node/network/statement-distribution/src/v2/cluster.rs
index 55d847f8315..8adb8353ca9 100644
--- a/polkadot/node/network/statement-distribution/src/vstaging/cluster.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/cluster.rs
@@ -55,7 +55,7 @@
 //! and to keep track of what we have sent to other validators in the group and what we may
 //! continue to send them.
 
-use polkadot_primitives::vstaging::{CandidateHash, CompactStatement, ValidatorIndex};
+use polkadot_primitives::{CandidateHash, CompactStatement, ValidatorIndex};
 
 use std::collections::{HashMap, HashSet};
 
@@ -459,7 +459,7 @@ pub enum RejectOutgoing {
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use polkadot_primitives::vstaging::Hash;
+	use polkadot_primitives::Hash;
 
 	#[test]
 	fn rejects_incoming_outside_of_group() {
diff --git a/polkadot/node/network/statement-distribution/src/vstaging/grid.rs b/polkadot/node/network/statement-distribution/src/v2/grid.rs
similarity index 99%
rename from polkadot/node/network/statement-distribution/src/vstaging/grid.rs
rename to polkadot/node/network/statement-distribution/src/v2/grid.rs
index 4fd77d0ced1..3d53ff6d321 100644
--- a/polkadot/node/network/statement-distribution/src/vstaging/grid.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/grid.rs
@@ -60,12 +60,8 @@
 //!         - which has sent a `BackedCandidateAcknowledgement`
 //!   - 1st-hop nodes do the same thing
 
-use polkadot_node_network_protocol::{
-	grid_topology::SessionGridTopology, vstaging::StatementFilter,
-};
-use polkadot_primitives::vstaging::{
-	CandidateHash, CompactStatement, GroupIndex, Hash, ValidatorIndex,
-};
+use polkadot_node_network_protocol::{grid_topology::SessionGridTopology, v2::StatementFilter};
+use polkadot_primitives::{CandidateHash, CompactStatement, GroupIndex, Hash, ValidatorIndex};
 
 use std::collections::{
 	hash_map::{Entry, HashMap},
diff --git a/polkadot/node/network/statement-distribution/src/vstaging/groups.rs b/polkadot/node/network/statement-distribution/src/v2/groups.rs
similarity index 96%
rename from polkadot/node/network/statement-distribution/src/vstaging/groups.rs
rename to polkadot/node/network/statement-distribution/src/v2/groups.rs
index b2daa1c0ac7..d917b209052 100644
--- a/polkadot/node/network/statement-distribution/src/vstaging/groups.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/groups.rs
@@ -17,8 +17,7 @@
 //! A utility for tracking groups and their members within a session.
 
 use polkadot_primitives::{
-	effective_minimum_backing_votes,
-	vstaging::{GroupIndex, IndexedVec, ValidatorIndex},
+	effective_minimum_backing_votes, GroupIndex, IndexedVec, ValidatorIndex,
 };
 
 use std::collections::HashMap;
diff --git a/polkadot/node/network/statement-distribution/src/vstaging/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs
similarity index 97%
rename from polkadot/node/network/statement-distribution/src/vstaging/mod.rs
rename to polkadot/node/network/statement-distribution/src/v2/mod.rs
index 4639720b322..e11d66c41a0 100644
--- a/polkadot/node/network/statement-distribution/src/vstaging/mod.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs
@@ -23,11 +23,11 @@ use polkadot_node_network_protocol::{
 	peer_set::ValidationVersion,
 	request_response::{
 		incoming::OutgoingResponse,
-		vstaging::{AttestedCandidateRequest, AttestedCandidateResponse},
+		v2::{AttestedCandidateRequest, AttestedCandidateResponse},
 		IncomingRequest, IncomingRequestReceiver, Requests,
 		MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS,
 	},
-	vstaging::{self as protocol_vstaging, StatementFilter},
+	v2::{self as protocol_v2, StatementFilter},
 	IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View,
 };
 use polkadot_node_primitives::{
@@ -45,7 +45,7 @@ use polkadot_node_subsystem_util::{
 	reputation::ReputationAggregator,
 	runtime::{request_min_backing_votes, ProspectiveParachainsMode},
 };
-use polkadot_primitives::vstaging::{
+use polkadot_primitives::{
 	AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, CoreState, GroupIndex,
 	GroupRotationInfo, Hash, Id as ParaId, IndexedVec, SessionIndex, SessionInfo, SignedStatement,
 	SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex,
@@ -323,7 +323,7 @@ pub(crate) async fn handle_network_update<Context>(
 		NetworkBridgeEvent::PeerConnected(peer_id, role, protocol_version, mut authority_ids) => {
 			gum::trace!(target: LOG_TARGET, ?peer_id, ?role, ?protocol_version, "Peer connected");
 
-			if protocol_version != ValidationVersion::VStaging.into() {
+			if protocol_version != ValidationVersion::V2.into() {
 				return
 			}
 
@@ -381,19 +381,19 @@ pub(crate) async fn handle_network_update<Context>(
 		},
 		NetworkBridgeEvent::PeerMessage(peer_id, message) => match message {
 			net_protocol::StatementDistributionMessage::V1(_) => return,
-			net_protocol::StatementDistributionMessage::VStaging(
-				protocol_vstaging::StatementDistributionMessage::V1Compatibility(_),
+			net_protocol::StatementDistributionMessage::V2(
+				protocol_v2::StatementDistributionMessage::V1Compatibility(_),
 			) => return,
-			net_protocol::StatementDistributionMessage::VStaging(
-				protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement),
+			net_protocol::StatementDistributionMessage::V2(
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement),
 			) =>
 				handle_incoming_statement(ctx, state, peer_id, relay_parent, statement, reputation)
 					.await,
-			net_protocol::StatementDistributionMessage::VStaging(
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(inner),
+			net_protocol::StatementDistributionMessage::V2(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(inner),
 			) => handle_incoming_manifest(ctx, state, peer_id, inner, reputation).await,
-			net_protocol::StatementDistributionMessage::VStaging(
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(inner),
+			net_protocol::StatementDistributionMessage::V2(
+				protocol_v2::StatementDistributionMessage::BackedCandidateKnown(inner),
 			) => handle_incoming_acknowledgement(ctx, state, peer_id, inner, reputation).await,
 		},
 		NetworkBridgeEvent::PeerViewChange(peer_id, view) =>
@@ -727,10 +727,8 @@ fn pending_statement_network_message(
 	statement_store
 		.validator_statement(originator, compact)
 		.map(|s| s.as_unchecked().clone())
-		.map(|signed| {
-			protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, signed)
-		})
-		.map(|msg| (vec![*peer], Versioned::VStaging(msg).into()))
+		.map(|signed| protocol_v2::StatementDistributionMessage::Statement(relay_parent, signed))
+		.map(|msg| (vec![*peer], Versioned::V2(msg).into()))
 }
 
 /// Send a peer all pending cluster statements for a relay parent.
@@ -823,7 +821,7 @@ async fn send_pending_grid_messages<Context>(
 
 		match kind {
 			grid::ManifestKind::Full => {
-				let manifest = protocol_vstaging::BackedCandidateManifest {
+				let manifest = protocol_v2::BackedCandidateManifest {
 					relay_parent,
 					candidate_hash,
 					group_index,
@@ -847,8 +845,8 @@ async fn send_pending_grid_messages<Context>(
 
 				messages.push((
 					vec![*peer_id],
-					Versioned::VStaging(
-						protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+					Versioned::V2(
+						protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 							manifest,
 						),
 					)
@@ -1192,7 +1190,7 @@ async fn circulate_statement<Context>(
 
 		ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
 			statement_to,
-			Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement(
+			Versioned::V2(protocol_v2::StatementDistributionMessage::Statement(
 				relay_parent,
 				statement.as_unchecked().clone(),
 			))
@@ -1672,7 +1670,7 @@ async fn provide_candidate_to_grid<Context>(
 		filter.clone(),
 	);
 
-	let manifest = protocol_vstaging::BackedCandidateManifest {
+	let manifest = protocol_v2::BackedCandidateManifest {
 		relay_parent,
 		candidate_hash,
 		group_index,
@@ -1680,16 +1678,15 @@ async fn provide_candidate_to_grid<Context>(
 		parent_head_data_hash: confirmed_candidate.parent_head_data_hash(),
 		statement_knowledge: filter.clone(),
 	};
-	let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement {
+	let acknowledgement = protocol_v2::BackedCandidateAcknowledgement {
 		candidate_hash,
 		statement_knowledge: filter.clone(),
 	};
 
-	let manifest_message = Versioned::VStaging(
-		protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest),
-	);
-	let ack_message = Versioned::VStaging(
-		protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement),
+	let manifest_message =
+		Versioned::V2(protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest));
+	let ack_message = Versioned::V2(
+		protocol_v2::StatementDistributionMessage::BackedCandidateKnown(acknowledgement),
 	);
 
 	let mut manifest_peers = Vec::new();
@@ -2062,8 +2059,8 @@ fn post_acknowledgement_statement_messages(
 			statement.payload(),
 		);
 
-		messages.push(Versioned::VStaging(
-			protocol_vstaging::StatementDistributionMessage::Statement(
+		messages.push(Versioned::V2(
+			protocol_v2::StatementDistributionMessage::Statement(
 				relay_parent,
 				statement.as_unchecked().clone(),
 			)
@@ -2079,7 +2076,7 @@ async fn handle_incoming_manifest<Context>(
 	ctx: &mut Context,
 	state: &mut State,
 	peer: PeerId,
-	manifest: net_protocol::vstaging::BackedCandidateManifest,
+	manifest: net_protocol::v2::BackedCandidateManifest,
 	reputation: &mut ReputationAggregator,
 ) {
 	gum::debug!(
@@ -2183,14 +2180,14 @@ fn acknowledgement_and_statement_messages(
 		Some(l) => l,
 	};
 
-	let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement {
+	let acknowledgement = protocol_v2::BackedCandidateAcknowledgement {
 		candidate_hash,
 		statement_knowledge: local_knowledge.clone(),
 	};
 
-	let msg = Versioned::VStaging(
-		protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement),
-	);
+	let msg = Versioned::V2(protocol_v2::StatementDistributionMessage::BackedCandidateKnown(
+		acknowledgement,
+	));
 
 	let mut messages = vec![(vec![peer], msg.into())];
 
@@ -2221,7 +2218,7 @@ async fn handle_incoming_acknowledgement<Context>(
 	ctx: &mut Context,
 	state: &mut State,
 	peer: PeerId,
-	acknowledgement: net_protocol::vstaging::BackedCandidateAcknowledgement,
+	acknowledgement: net_protocol::v2::BackedCandidateAcknowledgement,
 	reputation: &mut ReputationAggregator,
 ) {
 	// The key difference between acknowledgments and full manifests is that only
@@ -2521,7 +2518,7 @@ pub(crate) async fn dispatch_requests<Context>(ctx: &mut Context, state: &mut St
 	) {
 		// Peer is supposedly connected.
 		ctx.send_message(NetworkBridgeTxMessage::SendRequests(
-			vec![Requests::AttestedCandidateVStaging(request)],
+			vec![Requests::AttestedCandidateV2(request)],
 			IfDisconnected::ImmediateError,
 		))
 		.await;
diff --git a/polkadot/node/network/statement-distribution/src/vstaging/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs
similarity index 99%
rename from polkadot/node/network/statement-distribution/src/vstaging/requests.rs
rename to polkadot/node/network/statement-distribution/src/v2/requests.rs
index 79925f2115d..f13496024fc 100644
--- a/polkadot/node/network/statement-distribution/src/vstaging/requests.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs
@@ -39,14 +39,14 @@ use crate::LOG_TARGET;
 use polkadot_node_network_protocol::{
 	request_response::{
 		outgoing::{Recipient as RequestRecipient, RequestError},
-		vstaging::{AttestedCandidateRequest, AttestedCandidateResponse},
+		v2::{AttestedCandidateRequest, AttestedCandidateResponse},
 		OutgoingRequest, OutgoingResult, MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS,
 	},
-	vstaging::StatementFilter,
+	v2::StatementFilter,
 	PeerId, UnifiedReputationChange as Rep,
 };
-use polkadot_primitives::vstaging::{
-	CandidateHash, CommittedCandidateReceipt, CompactStatement, GroupIndex, Hash, ParaId,
+use polkadot_primitives::{
+	CandidateHash, CommittedCandidateReceipt, CompactStatement, GroupIndex, Hash, Id as ParaId,
 	PersistedValidationData, SessionIndex, SignedStatement, SigningContext, ValidatorId,
 	ValidatorIndex,
 };
diff --git a/polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs
similarity index 98%
rename from polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs
rename to polkadot/node/network/statement-distribution/src/v2/statement_store.rs
index 9ea926f24aa..74db431eda1 100644
--- a/polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs
@@ -24,8 +24,8 @@
 //! groups, and views based on the validators themselves.
 
 use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec};
-use polkadot_node_network_protocol::vstaging::StatementFilter;
-use polkadot_primitives::vstaging::{
+use polkadot_node_network_protocol::v2::StatementFilter;
+use polkadot_primitives::{
 	CandidateHash, CompactStatement, GroupIndex, SignedStatement, ValidatorIndex,
 };
 use std::collections::hash_map::{Entry as HEntry, HashMap};
diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs
similarity index 95%
rename from polkadot/node/network/statement-distribution/src/vstaging/tests/cluster.rs
rename to polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs
index 50d0477eb51..80dec1d75ab 100644
--- a/polkadot/node/network/statement-distribution/src/vstaging/tests/cluster.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs
@@ -103,8 +103,8 @@ fn share_seconded_circulated_to_cluster() {
 			overseer.recv().await,
 			AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(
 				peers,
-				Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution(
-					protocol_vstaging::StatementDistributionMessage::Statement(
+				Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution(
+					protocol_v2::StatementDistributionMessage::Statement(
 						r,
 						s,
 					)
@@ -173,7 +173,7 @@ fn cluster_valid_statement_before_seconded_ignored() {
 		send_peer_message(
 			&mut overseer,
 			peer_a.clone(),
-			protocol_vstaging::StatementDistributionMessage::Statement(
+			protocol_v2::StatementDistributionMessage::Statement(
 				relay_parent,
 				signed_valid.as_unchecked().clone(),
 			),
@@ -252,7 +252,7 @@ fn cluster_statement_bad_signature() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(
+				protocol_v2::StatementDistributionMessage::Statement(
 					relay_parent,
 					statement.clone(),
 				),
@@ -327,7 +327,7 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() {
 		send_peer_message(
 			&mut overseer,
 			peer_a.clone(),
-			protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement),
+			protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement),
 		)
 		.await;
 
@@ -388,7 +388,7 @@ fn statement_from_non_cluster_originator_unexpected() {
 		send_peer_message(
 			&mut overseer,
 			peer_a.clone(),
-			protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement),
+			protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement),
 		)
 		.await;
 
@@ -465,7 +465,7 @@ fn seconded_statement_leads_to_request() {
 		send_peer_message(
 			&mut overseer,
 			peer_a.clone(),
-			protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement),
+			protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement),
 		)
 		.await;
 
@@ -593,8 +593,8 @@ fn cluster_statements_shared_seconded_first() {
 
 				assert_matches!(
 					&messages[0].1,
-					Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution(
-						protocol_vstaging::StatementDistributionMessage::Statement(
+					Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution(
+						protocol_v2::StatementDistributionMessage::Statement(
 							r,
 							s,
 						)
@@ -604,8 +604,8 @@ fn cluster_statements_shared_seconded_first() {
 
 				assert_matches!(
 					&messages[1].1,
-					Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution(
-						protocol_vstaging::StatementDistributionMessage::Statement(
+					Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution(
+						protocol_v2::StatementDistributionMessage::Statement(
 							r,
 							s,
 						)
@@ -699,8 +699,8 @@ fn cluster_accounts_for_implicit_view() {
 			overseer.recv().await,
 			AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(
 				peers,
-				Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution(
-					protocol_vstaging::StatementDistributionMessage::Statement(
+				Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution(
+					protocol_v2::StatementDistributionMessage::Statement(
 						r,
 						s,
 					)
@@ -749,8 +749,8 @@ fn cluster_accounts_for_implicit_view() {
 					&messages[0],
 					(
 						peers,
-						Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution(
-							protocol_vstaging::StatementDistributionMessage::Statement(
+						Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution(
+							protocol_v2::StatementDistributionMessage::Statement(
 								r,
 								s,
 							)
@@ -836,10 +836,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(
-					relay_parent,
-					a_seconded,
-				),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded),
 			)
 			.await;
 
@@ -971,10 +968,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(
-					relay_parent,
-					a_seconded,
-				),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded),
 			)
 			.await;
 
@@ -1191,7 +1185,7 @@ fn ensure_seconding_limit_is_respected() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement),
 			)
 			.await;
 
@@ -1216,7 +1210,7 @@ fn ensure_seconding_limit_is_respected() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement),
 			)
 			.await;
 
@@ -1241,7 +1235,7 @@ fn ensure_seconding_limit_is_respected() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement),
 			)
 			.await;
 
diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs
similarity index 95%
rename from polkadot/node/network/statement-distribution/src/vstaging/tests/grid.rs
rename to polkadot/node/network/statement-distribution/src/v2/tests/grid.rs
index 0739f301943..a0af9579823 100644
--- a/polkadot/node/network/statement-distribution/src/vstaging/tests/grid.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs
@@ -17,9 +17,7 @@
 use super::*;
 
 use bitvec::order::Lsb0;
-use polkadot_node_network_protocol::vstaging::{
-	BackedCandidateAcknowledgement, BackedCandidateManifest,
-};
+use polkadot_node_network_protocol::v2::{BackedCandidateAcknowledgement, BackedCandidateManifest};
 use polkadot_node_subsystem::messages::CandidateBackingMessage;
 use polkadot_primitives_test_helpers::make_candidate;
 
@@ -156,7 +154,7 @@ fn backed_candidate_leads_to_advertisement() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement),
 			)
 			.await;
 
@@ -181,7 +179,7 @@ fn backed_candidate_leads_to_advertisement() {
 			send_peer_message(
 				&mut overseer,
 				peer_b.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement),
 			)
 			.await;
 
@@ -210,9 +208,9 @@ fn backed_candidate_leads_to_advertisement() {
 				AllMessages:: NetworkBridgeTx(
 					NetworkBridgeTxMessage::SendValidationMessage(
 						peers,
-						Versioned::VStaging(
-							protocol_vstaging::ValidationProtocol::StatementDistribution(
-								protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest),
+						Versioned::V2(
+							protocol_v2::ValidationProtocol::StatementDistribution(
+								protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest),
 							),
 						),
 					)
@@ -349,7 +347,7 @@ fn received_advertisement_before_confirmation_leads_to_request() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest),
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest),
 			)
 			.await;
 
@@ -534,7 +532,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -603,9 +601,9 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() {
 				AllMessages:: NetworkBridgeTx(
 					NetworkBridgeTxMessage::SendValidationMessage(
 						peers,
-						Versioned::VStaging(
-							protocol_vstaging::ValidationProtocol::StatementDistribution(
-								protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack),
+						Versioned::V2(
+							protocol_v2::ValidationProtocol::StatementDistribution(
+								protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack),
 							),
 						),
 					)
@@ -629,7 +627,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() {
 			send_peer_message(
 				&mut overseer,
 				peer_d.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -654,8 +652,8 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() {
 
 					assert_matches!(
 						&messages[0].1,
-						Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution(
-							protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack)
+						Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution(
+							protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack)
 						)) if *ack == expected_ack
 					);
 				}
@@ -782,7 +780,7 @@ fn received_advertisement_after_confirmation_before_backing() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -842,7 +840,7 @@ fn received_advertisement_after_confirmation_before_backing() {
 			send_peer_message(
 				&mut overseer,
 				peer_d.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -951,7 +949,7 @@ fn additional_statements_are_shared_after_manifest_exchange() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -1066,9 +1064,9 @@ fn additional_statements_are_shared_after_manifest_exchange() {
 				AllMessages:: NetworkBridgeTx(
 					NetworkBridgeTxMessage::SendValidationMessage(
 						peers,
-						Versioned::VStaging(
-							protocol_vstaging::ValidationProtocol::StatementDistribution(
-								protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack),
+						Versioned::V2(
+							protocol_v2::ValidationProtocol::StatementDistribution(
+								protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack),
 							),
 						),
 					)
@@ -1104,7 +1102,7 @@ fn additional_statements_are_shared_after_manifest_exchange() {
 			send_peer_message(
 				&mut overseer,
 				peer_d.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -1130,15 +1128,15 @@ fn additional_statements_are_shared_after_manifest_exchange() {
 
 					assert_matches!(
 						&messages[0].1,
-						Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution(
-							protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack)
+						Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution(
+							protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack)
 						)) if *ack == expected_ack
 					);
 
 					assert_matches!(
 						&messages[1].1,
-						Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution(
-							protocol_vstaging::StatementDistributionMessage::Statement(r, s)
+						Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution(
+							protocol_v2::StatementDistributionMessage::Statement(r, s)
 						)) if *r == relay_parent && s.unchecked_payload() == &CompactStatement::Seconded(candidate_hash) && s.unchecked_validator_index() == v_e
 					);
 				}
@@ -1281,7 +1279,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement),
 			)
 			.await;
 
@@ -1306,7 +1304,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() {
 			send_peer_message(
 				&mut overseer,
 				peer_b.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement),
 			)
 			.await;
 
@@ -1357,8 +1355,8 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() {
 
 					assert_matches!(
 						&messages[0].1,
-						Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution(
-							protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest)
+						Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution(
+							protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest)
 						)) => {
 							assert_eq!(*manifest, expected_manifest);
 						}
@@ -1504,7 +1502,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement),
 			)
 			.await;
 
@@ -1529,7 +1527,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() {
 			send_peer_message(
 				&mut overseer,
 				peer_b.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement),
 			)
 			.await;
 
@@ -1558,9 +1556,9 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() {
 				AllMessages:: NetworkBridgeTx(
 					NetworkBridgeTxMessage::SendValidationMessage(
 						peers,
-						Versioned::VStaging(
-							protocol_vstaging::ValidationProtocol::StatementDistribution(
-								protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest),
+						Versioned::V2(
+							protocol_v2::ValidationProtocol::StatementDistribution(
+								protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest),
 							),
 						),
 					)
@@ -1692,7 +1690,7 @@ fn grid_statements_imported_to_backing() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -1907,7 +1905,7 @@ fn advertisements_rejected_from_incorrect_peers() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -1925,7 +1923,7 @@ fn advertisements_rejected_from_incorrect_peers() {
 			send_peer_message(
 				&mut overseer,
 				peer_b.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest),
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest),
 			)
 			.await;
 
@@ -2029,7 +2027,7 @@ fn manifest_rejected_with_unknown_relay_parent() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -2131,7 +2129,7 @@ fn manifest_rejected_when_not_a_validator() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -2238,7 +2236,7 @@ fn manifest_rejected_when_group_does_not_match_para() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -2370,7 +2368,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -2439,7 +2437,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest),
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest),
 			)
 			.await;
 
diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs
similarity index 98%
rename from polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs
rename to polkadot/node/network/statement-distribution/src/v2/tests/mod.rs
index 48ceebb1949..4150377a0c6 100644
--- a/polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs
@@ -31,7 +31,7 @@ use polkadot_node_subsystem::messages::{
 };
 use polkadot_node_subsystem_test_helpers as test_helpers;
 use polkadot_node_subsystem_util::TimeoutExt;
-use polkadot_primitives::vstaging::{
+use polkadot_primitives::{
 	AssignmentPair, AsyncBackingParams, BlockNumber, CommittedCandidateReceipt, CoreState,
 	GroupRotationInfo, HeadData, Header, IndexedVec, PersistedValidationData, ScheduledCore,
 	SessionIndex, SessionInfo, ValidatorPair,
@@ -380,7 +380,7 @@ async fn handle_leaf_activation(
 	assert_matches!(
 		virtual_overseer.recv().await,
 		AllMessages::RuntimeApi(
-			RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx))
+			RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx))
 		) if parent == *hash => {
 			tx.send(Ok(test_state.config.async_backing_params.unwrap_or(DEFAULT_ASYNC_BACKING_PARAMETERS))).unwrap();
 		}
@@ -479,7 +479,7 @@ async fn handle_sent_request(
 			assert_eq!(requests.len(), 1);
 			assert_matches!(
 				requests.pop().unwrap(),
-				Requests::AttestedCandidateVStaging(outgoing) => {
+				Requests::AttestedCandidateV2(outgoing) => {
 					assert_eq!(outgoing.peer, Recipient::Peer(peer));
 					assert_eq!(outgoing.payload.candidate_hash, candidate_hash);
 					assert_eq!(outgoing.payload.mask, mask);
@@ -537,7 +537,7 @@ async fn connect_peer(
 				NetworkBridgeEvent::PeerConnected(
 					peer,
 					ObservedRole::Authority,
-					ValidationVersion::VStaging.into(),
+					ValidationVersion::V2.into(),
 					authority_ids,
 				),
 			),
@@ -570,12 +570,12 @@ async fn send_peer_view_change(virtual_overseer: &mut VirtualOverseer, peer: Pee
 async fn send_peer_message(
 	virtual_overseer: &mut VirtualOverseer,
 	peer: PeerId,
-	message: protocol_vstaging::StatementDistributionMessage,
+	message: protocol_v2::StatementDistributionMessage,
 ) {
 	virtual_overseer
 		.send(FromOrchestra::Communication {
 			msg: StatementDistributionMessage::NetworkBridgeUpdate(
-				NetworkBridgeEvent::PeerMessage(peer, Versioned::VStaging(message)),
+				NetworkBridgeEvent::PeerMessage(peer, Versioned::V2(message)),
 			),
 		})
 		.await;
diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs
similarity index 95%
rename from polkadot/node/network/statement-distribution/src/vstaging/tests/requests.rs
rename to polkadot/node/network/statement-distribution/src/v2/tests/requests.rs
index 5eef5809b4d..0734b75c971 100644
--- a/polkadot/node/network/statement-distribution/src/vstaging/tests/requests.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs
@@ -19,7 +19,7 @@ use super::*;
 use bitvec::order::Lsb0;
 use parity_scale_codec::{Decode, Encode};
 use polkadot_node_network_protocol::{
-	request_response::vstaging as request_vstaging, vstaging::BackedCandidateManifest,
+	request_response::v2 as request_v2, v2::BackedCandidateManifest,
 };
 use polkadot_primitives_test_helpers::make_candidate;
 use sc_network::config::{
@@ -109,10 +109,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(
-					relay_parent,
-					a_seconded,
-				),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded),
 			)
 			.await;
 
@@ -164,9 +161,9 @@ fn cluster_peer_allowed_to_send_incomplete_statements() {
 				AllMessages:: NetworkBridgeTx(
 					NetworkBridgeTxMessage::SendValidationMessage(
 						peers,
-						Versioned::VStaging(
-							protocol_vstaging::ValidationProtocol::StatementDistribution(
-								protocol_vstaging::StatementDistributionMessage::Statement(hash, statement),
+						Versioned::V2(
+							protocol_v2::ValidationProtocol::StatementDistribution(
+								protocol_v2::StatementDistributionMessage::Statement(hash, statement),
 							),
 						),
 					)
@@ -304,7 +301,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -376,7 +373,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -453,7 +450,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -568,9 +565,7 @@ fn peer_reported_for_not_enough_statements() {
 		send_peer_message(
 			&mut overseer,
 			peer_c.clone(),
-			protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
-				manifest.clone(),
-			),
+			protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest.clone()),
 		)
 		.await;
 
@@ -752,10 +747,7 @@ fn peer_reported_for_duplicate_statements() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(
-					relay_parent,
-					a_seconded,
-				),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded),
 			)
 			.await;
 
@@ -812,9 +804,9 @@ fn peer_reported_for_duplicate_statements() {
 				AllMessages:: NetworkBridgeTx(
 					NetworkBridgeTxMessage::SendValidationMessage(
 						peers,
-						Versioned::VStaging(
-							protocol_vstaging::ValidationProtocol::StatementDistribution(
-								protocol_vstaging::StatementDistributionMessage::Statement(hash, statement),
+						Versioned::V2(
+							protocol_v2::ValidationProtocol::StatementDistribution(
+								protocol_v2::StatementDistributionMessage::Statement(hash, statement),
 							),
 						),
 					)
@@ -916,10 +908,7 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(
-					relay_parent,
-					a_seconded,
-				),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded),
 			)
 			.await;
 
@@ -1058,10 +1047,7 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(
-					relay_parent,
-					a_seconded,
-				),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded),
 			)
 			.await;
 
@@ -1191,7 +1177,7 @@ fn local_node_sanity_checks_incoming_requests() {
 				.send(RawIncomingRequest {
 					// Request from peer that received manifest.
 					peer: peer_c,
-					payload: request_vstaging::AttestedCandidateRequest {
+					payload: request_v2::AttestedCandidateRequest {
 						candidate_hash: candidate.hash(),
 						mask: mask.clone(),
 					}
@@ -1225,8 +1211,8 @@ fn local_node_sanity_checks_incoming_requests() {
 				overseer.recv().await,
 				AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(
 					peers,
-					Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution(
-						protocol_vstaging::StatementDistributionMessage::Statement(
+					Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution(
+						protocol_v2::StatementDistributionMessage::Statement(
 							r,
 							s,
 						)
@@ -1250,7 +1236,7 @@ fn local_node_sanity_checks_incoming_requests() {
 				.send(RawIncomingRequest {
 					// Request from peer that received manifest.
 					peer: peer_d,
-					payload: request_vstaging::AttestedCandidateRequest {
+					payload: request_v2::AttestedCandidateRequest {
 						candidate_hash: candidate.hash(),
 						mask: mask.clone(),
 					}
@@ -1269,10 +1255,7 @@ fn local_node_sanity_checks_incoming_requests() {
 			let response = state
 				.send_request(
 					peer_c,
-					request_vstaging::AttestedCandidateRequest {
-						candidate_hash: candidate.hash(),
-						mask,
-					},
+					request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask },
 				)
 				.await
 				.await;
@@ -1296,7 +1279,7 @@ fn local_node_sanity_checks_incoming_requests() {
 			let response = state
 				.send_request(
 					peer_c,
-					request_vstaging::AttestedCandidateRequest {
+					request_v2::AttestedCandidateRequest {
 						candidate_hash: candidate.hash(),
 						mask: mask.clone(),
 					},
@@ -1455,7 +1438,7 @@ fn local_node_respects_statement_mask() {
 			send_peer_message(
 				&mut overseer,
 				peer_a.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement),
+				protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement),
 			)
 			.await;
 
@@ -1479,7 +1462,7 @@ fn local_node_respects_statement_mask() {
 			send_peer_message(
 				&mut overseer,
 				peer_b.clone(),
-				protocol_vstaging::StatementDistributionMessage::Statement(
+				protocol_v2::StatementDistributionMessage::Statement(
 					relay_parent,
 					statement_b.clone(),
 				),
@@ -1511,9 +1494,9 @@ fn local_node_respects_statement_mask() {
 				AllMessages:: NetworkBridgeTx(
 					NetworkBridgeTxMessage::SendValidationMessage(
 						peers,
-						Versioned::VStaging(
-							protocol_vstaging::ValidationProtocol::StatementDistribution(
-								protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest),
+						Versioned::V2(
+							protocol_v2::ValidationProtocol::StatementDistribution(
+								protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest),
 							),
 						),
 					)
@@ -1547,19 +1530,16 @@ fn local_node_respects_statement_mask() {
 			let response = state
 				.send_request(
 					peer_c,
-					request_vstaging::AttestedCandidateRequest {
-						candidate_hash: candidate.hash(),
-						mask,
-					},
+					request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask },
 				)
 				.await
 				.await;
 
 			let expected_statements = vec![statement_b];
 			assert_matches!(response, full_response => {
-				// Response is the same for vstaging.
-				let request_vstaging::AttestedCandidateResponse { candidate_receipt, persisted_validation_data, statements } =
-					request_vstaging::AttestedCandidateResponse::decode(
+				// Response is the same for v2.
+				let request_v2::AttestedCandidateResponse { candidate_receipt, persisted_validation_data, statements } =
+					request_v2::AttestedCandidateResponse::decode(
 						&mut full_response.result.expect("We should have a proper answer").as_ref(),
 					).expect("Decoding should work");
 				assert_eq!(candidate_receipt, candidate);
@@ -1683,7 +1663,7 @@ fn should_delay_before_retrying_dropped_requests() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
@@ -1696,7 +1676,7 @@ fn should_delay_before_retrying_dropped_requests() {
 					assert_eq!(requests.len(), 1);
 					assert_matches!(
 						requests.pop().unwrap(),
-						Requests::AttestedCandidateVStaging(outgoing) => {
+						Requests::AttestedCandidateV2(outgoing) => {
 							assert_eq!(outgoing.peer, Recipient::Peer(peer_c));
 							assert_eq!(outgoing.payload.candidate_hash, candidate_hash_1);
 							assert_eq!(outgoing.payload.mask, mask);
@@ -1729,7 +1709,7 @@ fn should_delay_before_retrying_dropped_requests() {
 			send_peer_message(
 				&mut overseer,
 				peer_c.clone(),
-				protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(
+				protocol_v2::StatementDistributionMessage::BackedCandidateManifest(
 					manifest.clone(),
 				),
 			)
diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml
index ee092e27733..ba5976fdcee 100644
--- a/polkadot/node/service/Cargo.toml
+++ b/polkadot/node/service/Cargo.toml
@@ -223,7 +223,3 @@ runtime-metrics = [
 	"rococo-runtime?/runtime-metrics",
 	"westend-runtime?/runtime-metrics",
 ]
-
-network-protocol-staging = [
-	"polkadot-node-network-protocol/network-protocol-staging",
-]
diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs
index 5286631fbbb..5991744dc3a 100644
--- a/polkadot/node/service/src/lib.rs
+++ b/polkadot/node/service/src/lib.rs
@@ -854,7 +854,7 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 	let (collation_req_v1_receiver, cfg) =
 		IncomingRequest::get_config_receiver(&req_protocol_names);
 	net_config.add_request_response_protocol(cfg);
-	let (collation_req_vstaging_receiver, cfg) =
+	let (collation_req_v2_receiver, cfg) =
 		IncomingRequest::get_config_receiver(&req_protocol_names);
 	net_config.add_request_response_protocol(cfg);
 	let (available_data_req_receiver, cfg) =
@@ -862,7 +862,7 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 	net_config.add_request_response_protocol(cfg);
 	let (statement_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names);
 	net_config.add_request_response_protocol(cfg);
-	let (candidate_req_vstaging_receiver, cfg) =
+	let (candidate_req_v2_receiver, cfg) =
 		IncomingRequest::get_config_receiver(&req_protocol_names);
 	net_config.add_request_response_protocol(cfg);
 	let (dispute_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names);
@@ -1051,10 +1051,10 @@ pub fn new_full<OverseerGenerator: OverseerGen>(
 					pov_req_receiver,
 					chunk_req_receiver,
 					collation_req_v1_receiver,
-					collation_req_vstaging_receiver,
+					collation_req_v2_receiver,
 					available_data_req_receiver,
 					statement_req_receiver,
-					candidate_req_vstaging_receiver,
+					candidate_req_v2_receiver,
 					dispute_req_receiver,
 					registry: prometheus_registry.as_ref(),
 					spawner,
diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs
index 33127b638e5..7d1add11824 100644
--- a/polkadot/node/service/src/overseer.rs
+++ b/polkadot/node/service/src/overseer.rs
@@ -28,7 +28,7 @@ use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig;
 use polkadot_node_network_protocol::{
 	peer_set::PeerSetProtocolNames,
 	request_response::{
-		v1 as request_v1, vstaging as request_vstaging, IncomingRequestReceiver, ReqProtocolNames,
+		v1 as request_v1, v2 as request_v2, IncomingRequestReceiver, ReqProtocolNames,
 	},
 };
 #[cfg(any(feature = "malus", test))]
@@ -104,17 +104,15 @@ where
 	pub chunk_req_receiver: IncomingRequestReceiver<request_v1::ChunkFetchingRequest>,
 	/// Collations request receiver for network protocol v1.
 	pub collation_req_v1_receiver: IncomingRequestReceiver<request_v1::CollationFetchingRequest>,
-	/// Collations request receiver for network protocol vstaging.
-	pub collation_req_vstaging_receiver:
-		IncomingRequestReceiver<request_vstaging::CollationFetchingRequest>,
+	/// Collations request receiver for network protocol v2.
+	pub collation_req_v2_receiver: IncomingRequestReceiver<request_v2::CollationFetchingRequest>,
 	/// Receiver for available data requests.
 	pub available_data_req_receiver:
 		IncomingRequestReceiver<request_v1::AvailableDataFetchingRequest>,
 	/// Receiver for incoming large statement requests.
 	pub statement_req_receiver: IncomingRequestReceiver<request_v1::StatementFetchingRequest>,
 	/// Receiver for incoming candidate requests.
-	pub candidate_req_vstaging_receiver:
-		IncomingRequestReceiver<request_vstaging::AttestedCandidateRequest>,
+	pub candidate_req_v2_receiver: IncomingRequestReceiver<request_v2::AttestedCandidateRequest>,
 	/// Receiver for incoming disputes.
 	pub dispute_req_receiver: IncomingRequestReceiver<request_v1::DisputeRequest>,
 	/// Prometheus registry, commonly used for production systems, less so for test.
@@ -158,10 +156,10 @@ pub fn prepared_overseer_builder<Spawner, RuntimeClient>(
 		pov_req_receiver,
 		chunk_req_receiver,
 		collation_req_v1_receiver,
-		collation_req_vstaging_receiver,
+		collation_req_v2_receiver,
 		available_data_req_receiver,
 		statement_req_receiver,
-		candidate_req_vstaging_receiver,
+		candidate_req_v2_receiver,
 		dispute_req_receiver,
 		registry,
 		spawner,
@@ -288,7 +286,7 @@ where
 					peer_id: network_service.local_peer_id(),
 					collator_pair,
 					request_receiver_v1: collation_req_v1_receiver,
-					request_receiver_vstaging: collation_req_vstaging_receiver,
+					request_receiver_v2: collation_req_v2_receiver,
 					metrics: Metrics::register(registry)?,
 				},
 				IsParachainNode::FullNode => ProtocolSide::None,
@@ -309,7 +307,7 @@ where
 		.statement_distribution(StatementDistributionSubsystem::new(
 			keystore.clone(),
 			statement_req_receiver,
-			candidate_req_vstaging_receiver,
+			candidate_req_v2_receiver,
 			Metrics::register(registry)?,
 			rand::rngs::StdRng::from_entropy(),
 		))
diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs
index a53908d3c2c..eb94f1696c9 100644
--- a/polkadot/node/subsystem-types/src/messages.rs
+++ b/polkadot/node/subsystem-types/src/messages.rs
@@ -39,12 +39,12 @@ use polkadot_node_primitives::{
 	ValidationResult,
 };
 use polkadot_primitives::{
-	slashing, vstaging as vstaging_primitives, AuthorityDiscoveryId, BackedCandidate, BlockNumber,
-	CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, CollatorId,
-	CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupIndex,
-	GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, InboundDownwardMessage,
-	InboundHrmpMessage, MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData,
-	PvfCheckStatement, PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield,
+	async_backing, slashing, AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent,
+	CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt,
+	CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash,
+	Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage,
+	MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement,
+	PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield,
 	SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex,
 	ValidatorSignature,
 };
@@ -695,14 +695,12 @@ pub enum RuntimeApiRequest {
 	),
 	/// Get the minimum required backing votes.
 	MinimumBackingVotes(SessionIndex, RuntimeApiSender<u32>),
-
 	/// Get the backing state of the given para.
-	/// This is a staging API that will not be available on production runtimes.
-	StagingParaBackingState(ParaId, RuntimeApiSender<Option<vstaging_primitives::BackingState>>),
+	ParaBackingState(ParaId, RuntimeApiSender<Option<async_backing::BackingState>>),
 	/// Get candidate's acceptance limitations for asynchronous backing for a relay parent.
 	///
 	/// If it's not supported by the Runtime, the async backing is said to be disabled.
-	StagingAsyncBackingParams(RuntimeApiSender<vstaging_primitives::AsyncBackingParams>),
+	AsyncBackingParams(RuntimeApiSender<async_backing::AsyncBackingParams>),
 }
 
 impl RuntimeApiRequest {
@@ -726,10 +724,8 @@ impl RuntimeApiRequest {
 	/// `MinimumBackingVotes`
 	pub const MINIMUM_BACKING_VOTES_RUNTIME_REQUIREMENT: u32 = 6;
 
-	/// Minimum version for backing state, required for async backing.
-	///
-	/// 99 for now, should be adjusted to VSTAGING/actual runtime version once released.
-	pub const STAGING_BACKING_STATE: u32 = 99;
+	/// Minimum version to enable asynchronous backing: `AsyncBackingParams` and `ParaBackingState`.
+	pub const STAGING_BACKING_STATE: u32 = 7;
 }
 
 /// A message to the Runtime API subsystem.
diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs
index 06aa351efb4..3007e985b4f 100644
--- a/polkadot/node/subsystem-types/src/runtime_client.rs
+++ b/polkadot/node/subsystem-types/src/runtime_client.rs
@@ -16,9 +16,9 @@
 
 use async_trait::async_trait;
 use polkadot_primitives::{
-	runtime_api::ParachainHost, vstaging, Block, BlockNumber, CandidateCommitments, CandidateEvent,
-	CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams,
-	GroupRotationInfo, Hash, Id, InboundDownwardMessage, InboundHrmpMessage,
+	async_backing, runtime_api::ParachainHost, slashing, Block, BlockNumber, CandidateCommitments,
+	CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState,
+	ExecutorParams, GroupRotationInfo, Hash, Id, InboundDownwardMessage, InboundHrmpMessage,
 	OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes,
 	SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex,
 	ValidatorSignature,
@@ -190,7 +190,7 @@ pub trait RuntimeApiSubsystemClient {
 	async fn unapplied_slashes(
 		&self,
 		at: Hash,
-	) -> Result<Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>, ApiError>;
+	) -> Result<Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>, ApiError>;
 
 	/// Returns a merkle proof of a validator session key in a past session.
 	///
@@ -199,7 +199,7 @@ pub trait RuntimeApiSubsystemClient {
 		&self,
 		at: Hash,
 		validator_id: ValidatorId,
-	) -> Result<Option<vstaging::slashing::OpaqueKeyOwnershipProof>, ApiError>;
+	) -> Result<Option<slashing::OpaqueKeyOwnershipProof>, ApiError>;
 
 	/// Submits an unsigned extrinsic to slash validators who lost a dispute about
 	/// a candidate of a past session.
@@ -208,8 +208,8 @@ pub trait RuntimeApiSubsystemClient {
 	async fn submit_report_dispute_lost(
 		&self,
 		at: Hash,
-		dispute_proof: vstaging::slashing::DisputeProof,
-		key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof,
+		dispute_proof: slashing::DisputeProof,
+		key_ownership_proof: slashing::OpaqueKeyOwnershipProof,
 	) -> Result<Option<()>, ApiError>;
 
 	// === BABE API ===
@@ -232,7 +232,7 @@ pub trait RuntimeApiSubsystemClient {
 		session_index: SessionIndex,
 	) -> Result<Option<ExecutorParams>, ApiError>;
 
-	// === STAGING v6 ===
+	// === v6 ===
 	/// Get the minimum number of backing votes.
 	async fn minimum_backing_votes(
 		&self,
@@ -240,21 +240,21 @@ pub trait RuntimeApiSubsystemClient {
 		session_index: SessionIndex,
 	) -> Result<u32, ApiError>;
 
-	// === Asynchronous backing API ===
+	// === v7: Asynchronous backing API ===
 
 	/// Returns candidate's acceptance limitations for asynchronous backing for a relay parent.
-	async fn staging_async_backing_params(
+	async fn async_backing_params(
 		&self,
 		at: Hash,
-	) -> Result<polkadot_primitives::vstaging::AsyncBackingParams, ApiError>;
+	) -> Result<polkadot_primitives::AsyncBackingParams, ApiError>;
 
 	/// Returns the state of parachain backing for a given para.
 	/// This is a staging method! Do not use on production runtimes!
-	async fn staging_para_backing_state(
+	async fn para_backing_state(
 		&self,
 		at: Hash,
 		para_id: Id,
-	) -> Result<Option<polkadot_primitives::vstaging::BackingState>, ApiError>;
+	) -> Result<Option<async_backing::BackingState>, ApiError>;
 }
 
 /// Default implementation of [`RuntimeApiSubsystemClient`] using the client.
@@ -454,7 +454,7 @@ where
 	async fn unapplied_slashes(
 		&self,
 		at: Hash,
-	) -> Result<Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>, ApiError> {
+	) -> Result<Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>, ApiError> {
 		self.client.runtime_api().unapplied_slashes(at)
 	}
 
@@ -462,15 +462,15 @@ where
 		&self,
 		at: Hash,
 		validator_id: ValidatorId,
-	) -> Result<Option<vstaging::slashing::OpaqueKeyOwnershipProof>, ApiError> {
+	) -> Result<Option<slashing::OpaqueKeyOwnershipProof>, ApiError> {
 		self.client.runtime_api().key_ownership_proof(at, validator_id)
 	}
 
 	async fn submit_report_dispute_lost(
 		&self,
 		at: Hash,
-		dispute_proof: vstaging::slashing::DisputeProof,
-		key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof,
+		dispute_proof: slashing::DisputeProof,
+		key_ownership_proof: slashing::OpaqueKeyOwnershipProof,
 	) -> Result<Option<()>, ApiError> {
 		let mut runtime_api = self.client.runtime_api();
 
@@ -489,19 +489,19 @@ where
 		self.client.runtime_api().minimum_backing_votes(at)
 	}
 
-	async fn staging_para_backing_state(
+	async fn para_backing_state(
 		&self,
 		at: Hash,
 		para_id: Id,
-	) -> Result<Option<polkadot_primitives::vstaging::BackingState>, ApiError> {
-		self.client.runtime_api().staging_para_backing_state(at, para_id)
+	) -> Result<Option<async_backing::BackingState>, ApiError> {
+		self.client.runtime_api().para_backing_state(at, para_id)
 	}
 
 	/// Returns candidate's acceptance limitations for asynchronous backing for a relay parent.
-	async fn staging_async_backing_params(
+	async fn async_backing_params(
 		&self,
 		at: Hash,
-	) -> Result<polkadot_primitives::vstaging::AsyncBackingParams, ApiError> {
-		self.client.runtime_api().staging_async_backing_params(at)
+	) -> Result<async_backing::AsyncBackingParams, ApiError> {
+		self.client.runtime_api().async_backing_params(at)
 	}
 }
diff --git a/polkadot/node/subsystem-util/src/backing_implicit_view.rs b/polkadot/node/subsystem-util/src/backing_implicit_view.rs
index 83c15fdef95..a14536a1766 100644
--- a/polkadot/node/subsystem-util/src/backing_implicit_view.rs
+++ b/polkadot/node/subsystem-util/src/backing_implicit_view.rs
@@ -20,7 +20,7 @@ use polkadot_node_subsystem::{
 	messages::{ChainApiMessage, ProspectiveParachainsMessage},
 	SubsystemSender,
 };
-use polkadot_primitives::vstaging::{BlockNumber, Hash, Id as ParaId};
+use polkadot_primitives::{BlockNumber, Hash, Id as ParaId};
 
 use std::collections::HashMap;
 
diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs
index 1487077d9ed..c7b91bffb3d 100644
--- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs
+++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs
@@ -11,4 +11,1437 @@
 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 // GNU General Public License for more details.
 
-pub mod staging;
+/// # Overview
+///
+/// A set of utilities for node-side code to emulate the logic the runtime uses for checking
+/// parachain blocks in order to build prospective parachains that are produced ahead of the
+/// relay chain. These utilities allow the node-side to predict, with high accuracy, what
+/// the relay-chain will accept in the near future.
+///
+/// This module has 2 key data types: [`Constraints`] and [`Fragment`]s. [`Constraints`]
+/// exhaustively define the set of valid inputs and outputs to parachain execution. A
+/// [`Fragment`] indicates a parachain block, anchored to the relay-chain at a particular
+/// relay-chain block, known as the relay-parent.
+///
+/// ## Fragment Validity
+///
+/// Every relay-parent is implicitly associated with a unique set of [`Constraints`] that
+/// describe the properties that must be true for a block to be included in a direct child of
+/// that block, assuming there is no intermediate parachain block pending availability.
+///
+/// However, the key factor that makes asynchronously-grown prospective chains
+/// possible is the fact that the relay-chain accepts candidate blocks based on whether they
+/// are valid under the constraints of the present moment, not based on whether they were
+/// valid at the time of construction.
+///
+/// As such, [`Fragment`]s are often, but not always constructed in such a way that they are
+/// invalid at first and become valid later on, as the relay chain grows.
+///
+/// # Usage
+///
+/// It's expected that the users of this module will be building up trees of
+/// [`Fragment`]s and consistently pruning and adding to the tree.
+///
+/// ## Operating Constraints
+///
+/// The *operating constraints* of a `Fragment` are the constraints with which that fragment
+/// was intended to comply. The operating constraints are defined as the base constraints
+/// of the relay-parent of the fragment modified by the cumulative modifications of all
+/// fragments between the relay-parent and the current fragment.
+///
+/// What the operating constraints are, in practice, is a prediction about the state of the
+/// relay-chain in the future. The relay-chain is aware of some current state, and we want to
+/// make an intelligent prediction about what might be accepted in the future based on
+/// prior fragments that also exist off-chain.
+///
+/// ## Fragment Trees
+///
+/// As the relay-chain grows, some predictions come true and others come false.
+/// And new predictions get made. These three changes correspond distinctly to the
+/// 3 primary operations on fragment trees.
+///
+/// A fragment tree is a mental model for thinking about a forking series of predictions
+/// about a single parachain. There may be one or more fragment trees per parachain.
+///
+/// In expectation, most parachains will have a plausibly-unique authorship method which means
+/// that they should really be much closer to fragment-chains, maybe with an occasional fork.
+///
+/// Avoiding fragment-tree blowup is beyond the scope of this module.
+///
+/// ### Pruning Fragment Trees
+///
+/// When the relay-chain advances, we want to compare the new constraints of that relay-parent
+/// to the roots of the fragment trees we have. There are 3 cases:
+///
+/// 1. The root fragment is still valid under the new constraints. In this case, we do nothing.
+///    This is the "prediction still uncertain" case.
+///
+/// 2. The root fragment is invalid under the new constraints because it has been subsumed by
+///    the relay-chain. In this case, we can discard the root and split & re-root the fragment
+///    tree under its descendents and compare to the new constraints again. This is the
+///    "prediction came true" case.
+///
+/// 3. The root fragment is invalid under the new constraints because a competing parachain
+///    block has been included or it would never be accepted for some other reason. In this
+///    case we can discard the entire fragment tree. This is the "prediction came false" case.
+///
+/// This is all a bit of a simplification because it assumes that the relay-chain advances
+/// without forks and is finalized instantly. In practice, the set of fragment-trees needs to
+/// be observable from the perspective of a few different possible forks of the relay-chain and
+/// not pruned too eagerly.
+///
+/// Note that the fragments themselves don't need to change and the only thing we care about
+/// is whether the predictions they represent are still valid.
+///
+/// ### Extending Fragment Trees
+///
+/// As predictions fade into the past, new ones should be stacked on top.
+///
+/// Every new relay-chain block is an opportunity to make a new prediction about the future.
+/// Higher-level logic should select the leaves of the fragment-trees to build upon or whether
+/// to create a new fragment-tree.
+///
+/// ### Code Upgrades
+///
+/// Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade
+/// scheduling logic is very path-dependent and intricate so we just assume that code upgrades
+/// can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep,
+/// in practice and code upgrades are fairly rare. So what's likely to happen around code
+/// upgrades is that the entire fragment-tree has to get discarded at some point.
+///
+/// That means a few blocks of execution time lost, which is not a big deal for code upgrades
+/// in practice at most once every few weeks.
+use polkadot_primitives::{
+	async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments,
+	CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, PersistedValidationData,
+	UpgradeRestriction, ValidationCodeHash,
+};
+use std::{
+	borrow::{Borrow, Cow},
+	collections::HashMap,
+};
+
+/// Constraints on inbound HRMP channels.
+#[derive(Debug, Clone, PartialEq)]
+pub struct InboundHrmpLimitations {
+	/// An exhaustive set of all valid watermarks, sorted ascending
+	pub valid_watermarks: Vec<BlockNumber>,
+}
+
+/// Constraints on outbound HRMP channels.
+#[derive(Debug, Clone, PartialEq)]
+pub struct OutboundHrmpChannelLimitations {
+	/// The maximum bytes that can be written to the channel.
+	pub bytes_remaining: usize,
+	/// The maximum messages that can be written to the channel.
+	pub messages_remaining: usize,
+}
+
+/// Constraints on the actions that can be taken by a new parachain
+/// block. These limitations are implicitly associated with some particular
+/// parachain, which should be apparent from usage.
+#[derive(Debug, Clone, PartialEq)]
+pub struct Constraints {
+	/// The minimum relay-parent number accepted under these constraints.
+	pub min_relay_parent_number: BlockNumber,
+	/// The maximum Proof-of-Validity size allowed, in bytes.
+	pub max_pov_size: usize,
+	/// The maximum new validation code size allowed, in bytes.
+	pub max_code_size: usize,
+	/// The amount of UMP messages remaining.
+	pub ump_remaining: usize,
+	/// The amount of UMP bytes remaining.
+	pub ump_remaining_bytes: usize,
+	/// The maximum number of UMP messages allowed per candidate.
+	pub max_ump_num_per_candidate: usize,
+	/// Remaining DMP queue. Only includes sent-at block numbers.
+	pub dmp_remaining_messages: Vec<BlockNumber>,
+	/// The limitations of all registered inbound HRMP channels.
+	pub hrmp_inbound: InboundHrmpLimitations,
+	/// The limitations of all registered outbound HRMP channels.
+	pub hrmp_channels_out: HashMap<ParaId, OutboundHrmpChannelLimitations>,
+	/// The maximum number of HRMP messages allowed per candidate.
+	pub max_hrmp_num_per_candidate: usize,
+	/// The required parent head-data of the parachain.
+	pub required_parent: HeadData,
+	/// The expected validation-code-hash of this parachain.
+	pub validation_code_hash: ValidationCodeHash,
+	/// The code upgrade restriction signal as-of this parachain.
+	pub upgrade_restriction: Option<UpgradeRestriction>,
+	/// The future validation code hash, if any, and at what relay-parent
+	/// number the upgrade would be minimally applied.
+	pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>,
+}
+
+impl From<PrimitiveConstraints> for Constraints {
+	fn from(c: PrimitiveConstraints) -> Self {
+		Constraints {
+			min_relay_parent_number: c.min_relay_parent_number,
+			max_pov_size: c.max_pov_size as _,
+			max_code_size: c.max_code_size as _,
+			ump_remaining: c.ump_remaining as _,
+			ump_remaining_bytes: c.ump_remaining_bytes as _,
+			max_ump_num_per_candidate: c.max_ump_num_per_candidate as _,
+			dmp_remaining_messages: c.dmp_remaining_messages,
+			hrmp_inbound: InboundHrmpLimitations {
+				valid_watermarks: c.hrmp_inbound.valid_watermarks,
+			},
+			hrmp_channels_out: c
+				.hrmp_channels_out
+				.into_iter()
+				.map(|(para_id, limits)| {
+					(
+						para_id,
+						OutboundHrmpChannelLimitations {
+							bytes_remaining: limits.bytes_remaining as _,
+							messages_remaining: limits.messages_remaining as _,
+						},
+					)
+				})
+				.collect(),
+			max_hrmp_num_per_candidate: c.max_hrmp_num_per_candidate as _,
+			required_parent: c.required_parent,
+			validation_code_hash: c.validation_code_hash,
+			upgrade_restriction: c.upgrade_restriction,
+			future_validation_code: c.future_validation_code,
+		}
+	}
+}
+
+/// Kinds of errors that can occur when modifying constraints.
+#[derive(Debug, Clone, PartialEq)]
+pub enum ModificationError {
+	/// The HRMP watermark is not allowed.
+	DisallowedHrmpWatermark(BlockNumber),
+	/// No such HRMP outbound channel.
+	NoSuchHrmpChannel(ParaId),
+	/// Too many messages submitted to HRMP channel.
+	HrmpMessagesOverflow {
+		/// The ID of the recipient.
+		para_id: ParaId,
+		/// The amount of remaining messages in the capacity of the channel.
+		messages_remaining: usize,
+		/// The amount of messages submitted to the channel.
+		messages_submitted: usize,
+	},
+	/// Too many bytes submitted to HRMP channel.
+	HrmpBytesOverflow {
+		/// The ID of the recipient.
+		para_id: ParaId,
+		/// The amount of remaining bytes in the capacity of the channel.
+		bytes_remaining: usize,
+		/// The amount of bytes submitted to the channel.
+		bytes_submitted: usize,
+	},
+	/// Too many messages submitted to UMP.
+	UmpMessagesOverflow {
+		/// The amount of remaining messages in the capacity of UMP.
+		messages_remaining: usize,
+		/// The amount of messages submitted to UMP.
+		messages_submitted: usize,
+	},
+	/// Too many bytes submitted to UMP.
+	UmpBytesOverflow {
+		/// The amount of remaining bytes in the capacity of UMP.
+		bytes_remaining: usize,
+		/// The amount of bytes submitted to UMP.
+		bytes_submitted: usize,
+	},
+	/// Too many messages processed from DMP.
+	DmpMessagesUnderflow {
+		/// The amount of messages waiting to be processed from DMP.
+		messages_remaining: usize,
+		/// The amount of messages processed.
+		messages_processed: usize,
+	},
+	/// No validation code upgrade to apply.
+	AppliedNonexistentCodeUpgrade,
+}
+
+impl Constraints {
+	/// Check modifications against constraints.
+	pub fn check_modifications(
+		&self,
+		modifications: &ConstraintModifications,
+	) -> Result<(), ModificationError> {
+		if let Some(HrmpWatermarkUpdate::Trunk(hrmp_watermark)) = modifications.hrmp_watermark {
+			// head updates are always valid.
+			if self.hrmp_inbound.valid_watermarks.iter().all(|w| w != &hrmp_watermark) {
+				return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark))
+			}
+		}
+
+		for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp {
+			if let Some(outbound) = self.hrmp_channels_out.get(&id) {
+				outbound.bytes_remaining.checked_sub(outbound_hrmp_mod.bytes_submitted).ok_or(
+					ModificationError::HrmpBytesOverflow {
+						para_id: *id,
+						bytes_remaining: outbound.bytes_remaining,
+						bytes_submitted: outbound_hrmp_mod.bytes_submitted,
+					},
+				)?;
+
+				outbound
+					.messages_remaining
+					.checked_sub(outbound_hrmp_mod.messages_submitted)
+					.ok_or(ModificationError::HrmpMessagesOverflow {
+						para_id: *id,
+						messages_remaining: outbound.messages_remaining,
+						messages_submitted: outbound_hrmp_mod.messages_submitted,
+					})?;
+			} else {
+				return Err(ModificationError::NoSuchHrmpChannel(*id))
+			}
+		}
+
+		self.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or(
+			ModificationError::UmpMessagesOverflow {
+				messages_remaining: self.ump_remaining,
+				messages_submitted: modifications.ump_messages_sent,
+			},
+		)?;
+
+		self.ump_remaining_bytes.checked_sub(modifications.ump_bytes_sent).ok_or(
+			ModificationError::UmpBytesOverflow {
+				bytes_remaining: self.ump_remaining_bytes,
+				bytes_submitted: modifications.ump_bytes_sent,
+			},
+		)?;
+
+		self.dmp_remaining_messages
+			.len()
+			.checked_sub(modifications.dmp_messages_processed)
+			.ok_or(ModificationError::DmpMessagesUnderflow {
+				messages_remaining: self.dmp_remaining_messages.len(),
+				messages_processed: modifications.dmp_messages_processed,
+			})?;
+
+		if self.future_validation_code.is_none() && modifications.code_upgrade_applied {
+			return Err(ModificationError::AppliedNonexistentCodeUpgrade)
+		}
+
+		Ok(())
+	}
+
+	/// Apply modifications to these constraints. If this succeeds, it passes
+	/// all sanity-checks.
+	pub fn apply_modifications(
+		&self,
+		modifications: &ConstraintModifications,
+	) -> Result<Self, ModificationError> {
+		let mut new = self.clone();
+
+		if let Some(required_parent) = modifications.required_parent.as_ref() {
+			new.required_parent = required_parent.clone();
+		}
+
+		if let Some(ref hrmp_watermark) = modifications.hrmp_watermark {
+			match new.hrmp_inbound.valid_watermarks.binary_search(&hrmp_watermark.watermark()) {
+				Ok(pos) => {
+					// Exact match, so this is OK in all cases.
+					let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1);
+				},
+				Err(pos) => match hrmp_watermark {
+					HrmpWatermarkUpdate::Head(_) => {
+						// Updates to Head are always OK.
+						let _ = new.hrmp_inbound.valid_watermarks.drain(..pos);
+					},
+					HrmpWatermarkUpdate::Trunk(n) => {
+						// Trunk update landing on disallowed watermark is not OK.
+						return Err(ModificationError::DisallowedHrmpWatermark(*n))
+					},
+				},
+			}
+		}
+
+		for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp {
+			if let Some(outbound) = new.hrmp_channels_out.get_mut(&id) {
+				outbound.bytes_remaining = outbound
+					.bytes_remaining
+					.checked_sub(outbound_hrmp_mod.bytes_submitted)
+					.ok_or(ModificationError::HrmpBytesOverflow {
+						para_id: *id,
+						bytes_remaining: outbound.bytes_remaining,
+						bytes_submitted: outbound_hrmp_mod.bytes_submitted,
+					})?;
+
+				outbound.messages_remaining = outbound
+					.messages_remaining
+					.checked_sub(outbound_hrmp_mod.messages_submitted)
+					.ok_or(ModificationError::HrmpMessagesOverflow {
+						para_id: *id,
+						messages_remaining: outbound.messages_remaining,
+						messages_submitted: outbound_hrmp_mod.messages_submitted,
+					})?;
+			} else {
+				return Err(ModificationError::NoSuchHrmpChannel(*id))
+			}
+		}
+
+		new.ump_remaining = new.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or(
+			ModificationError::UmpMessagesOverflow {
+				messages_remaining: new.ump_remaining,
+				messages_submitted: modifications.ump_messages_sent,
+			},
+		)?;
+
+		new.ump_remaining_bytes = new
+			.ump_remaining_bytes
+			.checked_sub(modifications.ump_bytes_sent)
+			.ok_or(ModificationError::UmpBytesOverflow {
+				bytes_remaining: new.ump_remaining_bytes,
+				bytes_submitted: modifications.ump_bytes_sent,
+			})?;
+
+		if modifications.dmp_messages_processed > new.dmp_remaining_messages.len() {
+			return Err(ModificationError::DmpMessagesUnderflow {
+				messages_remaining: new.dmp_remaining_messages.len(),
+				messages_processed: modifications.dmp_messages_processed,
+			})
+		} else {
+			new.dmp_remaining_messages =
+				new.dmp_remaining_messages[modifications.dmp_messages_processed..].to_vec();
+		}
+
+		if modifications.code_upgrade_applied {
+			new.validation_code_hash = new
+				.future_validation_code
+				.take()
+				.ok_or(ModificationError::AppliedNonexistentCodeUpgrade)?
+				.1;
+		}
+
+		Ok(new)
+	}
+}
+
+/// Information about a relay-chain block.
+#[derive(Debug, Clone, PartialEq)]
+pub struct RelayChainBlockInfo {
+	/// The hash of the relay-chain block.
+	pub hash: Hash,
+	/// The number of the relay-chain block.
+	pub number: BlockNumber,
+	/// The storage-root of the relay-chain block.
+	pub storage_root: Hash,
+}
+
+/// An update to outbound HRMP channels.
+#[derive(Debug, Clone, PartialEq, Default)]
+pub struct OutboundHrmpChannelModification {
+	/// The number of bytes submitted to the channel.
+	pub bytes_submitted: usize,
+	/// The number of messages submitted to the channel.
+	pub messages_submitted: usize,
+}
+
+/// An update to the HRMP Watermark.
+#[derive(Debug, Clone, PartialEq)]
+pub enum HrmpWatermarkUpdate {
+	/// This is an update placing the watermark at the head of the chain,
+	/// which is always legal.
+	Head(BlockNumber),
+	/// This is an update placing the watermark behind the head of the
+	/// chain, which is only legal if it lands on a block where messages
+	/// were queued.
+	Trunk(BlockNumber),
+}
+
+impl HrmpWatermarkUpdate {
+	fn watermark(&self) -> BlockNumber {
+		match *self {
+			HrmpWatermarkUpdate::Head(n) | HrmpWatermarkUpdate::Trunk(n) => n,
+		}
+	}
+}
+
+/// Modifications to constraints as a result of prospective candidates.
+#[derive(Debug, Clone, PartialEq)]
+pub struct ConstraintModifications {
+	/// The required parent head to build upon.
+	pub required_parent: Option<HeadData>,
+	/// The new HRMP watermark
+	pub hrmp_watermark: Option<HrmpWatermarkUpdate>,
+	/// Outbound HRMP channel modifications.
+	pub outbound_hrmp: HashMap<ParaId, OutboundHrmpChannelModification>,
+	/// The amount of UMP messages sent.
+	pub ump_messages_sent: usize,
+	/// The amount of UMP bytes sent.
+	pub ump_bytes_sent: usize,
+	/// The amount of DMP messages processed.
+	pub dmp_messages_processed: usize,
+	/// Whether a pending code upgrade has been applied.
+	pub code_upgrade_applied: bool,
+}
+
+impl ConstraintModifications {
+	/// The 'identity' modifications: these can be applied to
+	/// any constraints and yield the exact same result.
+	pub fn identity() -> Self {
+		ConstraintModifications {
+			required_parent: None,
+			hrmp_watermark: None,
+			outbound_hrmp: HashMap::new(),
+			ump_messages_sent: 0,
+			ump_bytes_sent: 0,
+			dmp_messages_processed: 0,
+			code_upgrade_applied: false,
+		}
+	}
+
+	/// Stack other modifications on top of these.
+	///
+	/// This does no sanity-checking, so if `other` is garbage relative
+	/// to `self`, then the new value will be garbage as well.
+	///
+	/// This is an addition which is not commutative.
+	pub fn stack(&mut self, other: &Self) {
+		if let Some(ref new_parent) = other.required_parent {
+			self.required_parent = Some(new_parent.clone());
+		}
+		if let Some(ref new_hrmp_watermark) = other.hrmp_watermark {
+			self.hrmp_watermark = Some(new_hrmp_watermark.clone());
+		}
+
+		for (id, mods) in &other.outbound_hrmp {
+			let record = self.outbound_hrmp.entry(*id).or_default();
+			record.messages_submitted += mods.messages_submitted;
+			record.bytes_submitted += mods.bytes_submitted;
+		}
+
+		self.ump_messages_sent += other.ump_messages_sent;
+		self.ump_bytes_sent += other.ump_bytes_sent;
+		self.dmp_messages_processed += other.dmp_messages_processed;
+		self.code_upgrade_applied |= other.code_upgrade_applied;
+	}
+}
+
+/// The prospective candidate.
+///
+/// This comprises the key information that represent a candidate
+/// without pinning it to a particular session. For example, everything
+/// to do with the collator's signature and commitments are represented
+/// here. But the erasure-root is not. This means that prospective candidates
+/// are not correlated to any session in particular.
+#[derive(Debug, Clone, PartialEq)]
+pub struct ProspectiveCandidate<'a> {
+	/// The commitments to the output of the execution.
+	pub commitments: Cow<'a, CandidateCommitments>,
+	/// The collator that created the candidate.
+	pub collator: CollatorId,
+	/// The signature of the collator on the payload.
+	pub collator_signature: CollatorSignature,
+	/// The persisted validation data used to create the candidate.
+	pub persisted_validation_data: PersistedValidationData,
+	/// The hash of the PoV.
+	pub pov_hash: Hash,
+	/// The validation code hash used by the candidate.
+	pub validation_code_hash: ValidationCodeHash,
+}
+
+impl<'a> ProspectiveCandidate<'a> {
+	fn into_owned(self) -> ProspectiveCandidate<'static> {
+		ProspectiveCandidate { commitments: Cow::Owned(self.commitments.into_owned()), ..self }
+	}
+
+	/// Partially clone the prospective candidate, but borrow the
+	/// parts which are potentially heavy.
+	pub fn partial_clone(&self) -> ProspectiveCandidate {
+		ProspectiveCandidate {
+			commitments: Cow::Borrowed(self.commitments.borrow()),
+			collator: self.collator.clone(),
+			collator_signature: self.collator_signature.clone(),
+			persisted_validation_data: self.persisted_validation_data.clone(),
+			pov_hash: self.pov_hash,
+			validation_code_hash: self.validation_code_hash,
+		}
+	}
+}
+
+#[cfg(test)]
+impl ProspectiveCandidate<'static> {
+	fn commitments_mut(&mut self) -> &mut CandidateCommitments {
+		self.commitments.to_mut()
+	}
+}
+
+/// Kinds of errors with the validity of a fragment.
+#[derive(Debug, Clone, PartialEq)]
+pub enum FragmentValidityError {
+	/// The validation code of the candidate doesn't match the
+	/// operating constraints.
+	///
+	/// Expected, Got
+	ValidationCodeMismatch(ValidationCodeHash, ValidationCodeHash),
+	/// The persisted-validation-data doesn't match.
+	///
+	/// Expected, Got
+	PersistedValidationDataMismatch(PersistedValidationData, PersistedValidationData),
+	/// The outputs of the candidate are invalid under the operating
+	/// constraints.
+	OutputsInvalid(ModificationError),
+	/// New validation code size too big.
+	///
+	/// Max allowed, new.
+	CodeSizeTooLarge(usize, usize),
+	/// Relay parent too old.
+	///
+	/// Min allowed, current.
+	RelayParentTooOld(BlockNumber, BlockNumber),
+	/// Para is required to process at least one DMP message from the queue.
+	DmpAdvancementRule,
+	/// Too many messages upward messages submitted.
+	UmpMessagesPerCandidateOverflow {
+		/// The amount of messages a single candidate can submit.
+		messages_allowed: usize,
+		/// The amount of messages sent to all HRMP channels.
+		messages_submitted: usize,
+	},
+	/// Too many messages submitted to all HRMP channels.
+	HrmpMessagesPerCandidateOverflow {
+		/// The amount of messages a single candidate can submit.
+		messages_allowed: usize,
+		/// The amount of messages sent to all HRMP channels.
+		messages_submitted: usize,
+	},
+	/// Code upgrade not allowed.
+	CodeUpgradeRestricted,
+	/// HRMP messages are not ascending or are duplicate.
+	///
+	/// The `usize` is the index into the outbound HRMP messages of
+	/// the candidate.
+	HrmpMessagesDescendingOrDuplicate(usize),
+}
+
+/// A parachain fragment, representing another prospective parachain block.
+///
+/// This is a type which guarantees that the candidate is valid under the
+/// operating constraints.
+#[derive(Debug, Clone, PartialEq)]
+pub struct Fragment<'a> {
+	/// The new relay-parent.
+	relay_parent: RelayChainBlockInfo,
+	/// The constraints this fragment is operating under.
+	operating_constraints: Constraints,
+	/// The core information about the prospective candidate.
+	candidate: ProspectiveCandidate<'a>,
+	/// Modifications to the constraints based on the outputs of
+	/// the candidate.
+	modifications: ConstraintModifications,
+}
+
+impl<'a> Fragment<'a> {
+	/// Create a new fragment.
+	///
+	/// This fails if the fragment isn't in line with the operating
+	/// constraints. That is, either its inputs or its outputs fail
+	/// checks against the constraints.
+	///
+	/// This doesn't check that the collator signature is valid or
+	/// whether the PoV is small enough.
+	pub fn new(
+		relay_parent: RelayChainBlockInfo,
+		operating_constraints: Constraints,
+		candidate: ProspectiveCandidate<'a>,
+	) -> Result<Self, FragmentValidityError> {
+		let modifications = {
+			let commitments = &candidate.commitments;
+			ConstraintModifications {
+				required_parent: Some(commitments.head_data.clone()),
+				hrmp_watermark: Some({
+					if commitments.hrmp_watermark == relay_parent.number {
+						HrmpWatermarkUpdate::Head(commitments.hrmp_watermark)
+					} else {
+						HrmpWatermarkUpdate::Trunk(commitments.hrmp_watermark)
+					}
+				}),
+				outbound_hrmp: {
+					let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new();
+
+					let mut last_recipient = None::<ParaId>;
+					for (i, message) in commitments.horizontal_messages.iter().enumerate() {
+						if let Some(last) = last_recipient {
+							if last >= message.recipient {
+								return Err(
+									FragmentValidityError::HrmpMessagesDescendingOrDuplicate(i),
+								)
+							}
+						}
+
+						last_recipient = Some(message.recipient);
+						let record = outbound_hrmp.entry(message.recipient).or_default();
+
+						record.bytes_submitted += message.data.len();
+						record.messages_submitted += 1;
+					}
+
+					outbound_hrmp
+				},
+				ump_messages_sent: commitments.upward_messages.len(),
+				ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(),
+				dmp_messages_processed: commitments.processed_downward_messages as _,
+				code_upgrade_applied: operating_constraints
+					.future_validation_code
+					.map_or(false, |(at, _)| relay_parent.number >= at),
+			}
+		};
+
+		validate_against_constraints(
+			&operating_constraints,
+			&relay_parent,
+			&candidate,
+			&modifications,
+		)?;
+
+		Ok(Fragment { relay_parent, operating_constraints, candidate, modifications })
+	}
+
+	/// Access the relay parent information.
+	pub fn relay_parent(&self) -> &RelayChainBlockInfo {
+		&self.relay_parent
+	}
+
+	/// Access the operating constraints
+	pub fn operating_constraints(&self) -> &Constraints {
+		&self.operating_constraints
+	}
+
+	/// Access the underlying prospective candidate.
+	pub fn candidate(&self) -> &ProspectiveCandidate<'a> {
+		&self.candidate
+	}
+
+	/// Modifications to constraints based on the outputs of the candidate.
+	pub fn constraint_modifications(&self) -> &ConstraintModifications {
+		&self.modifications
+	}
+
+	/// Convert the fragment into an owned variant.
+	pub fn into_owned(self) -> Fragment<'static> {
+		Fragment { candidate: self.candidate.into_owned(), ..self }
+	}
+
+	/// Validate this fragment against some set of constraints
+	/// instead of the operating constraints.
+	pub fn validate_against_constraints(
+		&self,
+		constraints: &Constraints,
+	) -> Result<(), FragmentValidityError> {
+		validate_against_constraints(
+			constraints,
+			&self.relay_parent,
+			&self.candidate,
+			&self.modifications,
+		)
+	}
+}
+
+fn validate_against_constraints(
+	constraints: &Constraints,
+	relay_parent: &RelayChainBlockInfo,
+	candidate: &ProspectiveCandidate,
+	modifications: &ConstraintModifications,
+) -> Result<(), FragmentValidityError> {
+	let expected_pvd = PersistedValidationData {
+		parent_head: constraints.required_parent.clone(),
+		relay_parent_number: relay_parent.number,
+		relay_parent_storage_root: relay_parent.storage_root,
+		max_pov_size: constraints.max_pov_size as u32,
+	};
+
+	if expected_pvd != candidate.persisted_validation_data {
+		return Err(FragmentValidityError::PersistedValidationDataMismatch(
+			expected_pvd,
+			candidate.persisted_validation_data.clone(),
+		))
+	}
+
+	if constraints.validation_code_hash != candidate.validation_code_hash {
+		return Err(FragmentValidityError::ValidationCodeMismatch(
+			constraints.validation_code_hash,
+			candidate.validation_code_hash,
+		))
+	}
+
+	if relay_parent.number < constraints.min_relay_parent_number {
+		return Err(FragmentValidityError::RelayParentTooOld(
+			constraints.min_relay_parent_number,
+			relay_parent.number,
+		))
+	}
+
+	if candidate.commitments.new_validation_code.is_some() {
+		match constraints.upgrade_restriction {
+			None => {},
+			Some(UpgradeRestriction::Present) =>
+				return Err(FragmentValidityError::CodeUpgradeRestricted),
+		}
+	}
+
+	let announced_code_size = candidate
+		.commitments
+		.new_validation_code
+		.as_ref()
+		.map_or(0, |code| code.0.len());
+
+	if announced_code_size > constraints.max_code_size {
+		return Err(FragmentValidityError::CodeSizeTooLarge(
+			constraints.max_code_size,
+			announced_code_size,
+		))
+	}
+
+	if modifications.dmp_messages_processed == 0 {
+		if constraints
+			.dmp_remaining_messages
+			.get(0)
+			.map_or(false, |&msg_sent_at| msg_sent_at <= relay_parent.number)
+		{
+			return Err(FragmentValidityError::DmpAdvancementRule)
+		}
+	}
+
+	if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate {
+		return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow {
+			messages_allowed: constraints.max_hrmp_num_per_candidate,
+			messages_submitted: candidate.commitments.horizontal_messages.len(),
+		})
+	}
+
+	if candidate.commitments.upward_messages.len() > constraints.max_ump_num_per_candidate {
+		return Err(FragmentValidityError::UmpMessagesPerCandidateOverflow {
+			messages_allowed: constraints.max_ump_num_per_candidate,
+			messages_submitted: candidate.commitments.upward_messages.len(),
+		})
+	}
+
+	constraints
+		.check_modifications(&modifications)
+		.map_err(FragmentValidityError::OutputsInvalid)
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+	use polkadot_primitives::{
+		CollatorPair, HorizontalMessages, OutboundHrmpMessage, ValidationCode,
+	};
+	use sp_application_crypto::Pair;
+
+	#[test]
+	fn stack_modifications() {
+		let para_a = ParaId::from(1u32);
+		let para_b = ParaId::from(2u32);
+		let para_c = ParaId::from(3u32);
+
+		let a = ConstraintModifications {
+			required_parent: None,
+			hrmp_watermark: None,
+			outbound_hrmp: {
+				let mut map = HashMap::new();
+				map.insert(
+					para_a,
+					OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 },
+				);
+
+				map.insert(
+					para_b,
+					OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 },
+				);
+
+				map
+			},
+			ump_messages_sent: 6,
+			ump_bytes_sent: 1000,
+			dmp_messages_processed: 5,
+			code_upgrade_applied: true,
+		};
+
+		let b = ConstraintModifications {
+			required_parent: None,
+			hrmp_watermark: None,
+			outbound_hrmp: {
+				let mut map = HashMap::new();
+				map.insert(
+					para_b,
+					OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 },
+				);
+
+				map.insert(
+					para_c,
+					OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 },
+				);
+
+				map
+			},
+			ump_messages_sent: 6,
+			ump_bytes_sent: 1000,
+			dmp_messages_processed: 5,
+			code_upgrade_applied: true,
+		};
+
+		let mut c = a.clone();
+		c.stack(&b);
+
+		assert_eq!(
+			c,
+			ConstraintModifications {
+				required_parent: None,
+				hrmp_watermark: None,
+				outbound_hrmp: {
+					let mut map = HashMap::new();
+					map.insert(
+						para_a,
+						OutboundHrmpChannelModification {
+							bytes_submitted: 100,
+							messages_submitted: 5,
+						},
+					);
+
+					map.insert(
+						para_b,
+						OutboundHrmpChannelModification {
+							bytes_submitted: 200,
+							messages_submitted: 10,
+						},
+					);
+
+					map.insert(
+						para_c,
+						OutboundHrmpChannelModification {
+							bytes_submitted: 100,
+							messages_submitted: 5,
+						},
+					);
+
+					map
+				},
+				ump_messages_sent: 12,
+				ump_bytes_sent: 2000,
+				dmp_messages_processed: 10,
+				code_upgrade_applied: true,
+			},
+		);
+
+		let mut d = ConstraintModifications::identity();
+		d.stack(&a);
+		d.stack(&b);
+
+		assert_eq!(c, d);
+	}
+
+	fn make_constraints() -> Constraints {
+		let para_a = ParaId::from(1u32);
+		let para_b = ParaId::from(2u32);
+		let para_c = ParaId::from(3u32);
+
+		Constraints {
+			min_relay_parent_number: 5,
+			max_pov_size: 1000,
+			max_code_size: 1000,
+			ump_remaining: 10,
+			ump_remaining_bytes: 1024,
+			max_ump_num_per_candidate: 5,
+			dmp_remaining_messages: Vec::new(),
+			hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![6, 8] },
+			hrmp_channels_out: {
+				let mut map = HashMap::new();
+
+				map.insert(
+					para_a,
+					OutboundHrmpChannelLimitations { messages_remaining: 5, bytes_remaining: 512 },
+				);
+
+				map.insert(
+					para_b,
+					OutboundHrmpChannelLimitations {
+						messages_remaining: 10,
+						bytes_remaining: 1024,
+					},
+				);
+
+				map.insert(
+					para_c,
+					OutboundHrmpChannelLimitations { messages_remaining: 1, bytes_remaining: 128 },
+				);
+
+				map
+			},
+			max_hrmp_num_per_candidate: 5,
+			required_parent: HeadData::from(vec![1, 2, 3]),
+			validation_code_hash: ValidationCode(vec![4, 5, 6]).hash(),
+			upgrade_restriction: None,
+			future_validation_code: None,
+		}
+	}
+
+	#[test]
+	fn constraints_disallowed_trunk_watermark() {
+		let constraints = make_constraints();
+		let mut modifications = ConstraintModifications::identity();
+		modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Trunk(7));
+
+		assert_eq!(
+			constraints.check_modifications(&modifications),
+			Err(ModificationError::DisallowedHrmpWatermark(7)),
+		);
+
+		assert_eq!(
+			constraints.apply_modifications(&modifications),
+			Err(ModificationError::DisallowedHrmpWatermark(7)),
+		);
+	}
+
+	#[test]
+	fn constraints_always_allow_head_watermark() {
+		let constraints = make_constraints();
+		let mut modifications = ConstraintModifications::identity();
+		modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Head(7));
+
+		assert!(constraints.check_modifications(&modifications).is_ok());
+
+		let new_constraints = constraints.apply_modifications(&modifications).unwrap();
+		assert_eq!(new_constraints.hrmp_inbound.valid_watermarks, vec![8]);
+	}
+
+	#[test]
+	fn constraints_no_such_hrmp_channel() {
+		let constraints = make_constraints();
+		let mut modifications = ConstraintModifications::identity();
+		let bad_para = ParaId::from(100u32);
+		modifications.outbound_hrmp.insert(
+			bad_para,
+			OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 0 },
+		);
+
+		assert_eq!(
+			constraints.check_modifications(&modifications),
+			Err(ModificationError::NoSuchHrmpChannel(bad_para)),
+		);
+
+		assert_eq!(
+			constraints.apply_modifications(&modifications),
+			Err(ModificationError::NoSuchHrmpChannel(bad_para)),
+		);
+	}
+
+	#[test]
+	fn constraints_hrmp_messages_overflow() {
+		let constraints = make_constraints();
+		let mut modifications = ConstraintModifications::identity();
+		let para_a = ParaId::from(1u32);
+		modifications.outbound_hrmp.insert(
+			para_a,
+			OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 6 },
+		);
+
+		assert_eq!(
+			constraints.check_modifications(&modifications),
+			Err(ModificationError::HrmpMessagesOverflow {
+				para_id: para_a,
+				messages_remaining: 5,
+				messages_submitted: 6,
+			}),
+		);
+
+		assert_eq!(
+			constraints.apply_modifications(&modifications),
+			Err(ModificationError::HrmpMessagesOverflow {
+				para_id: para_a,
+				messages_remaining: 5,
+				messages_submitted: 6,
+			}),
+		);
+	}
+
+	#[test]
+	fn constraints_hrmp_bytes_overflow() {
+		let constraints = make_constraints();
+		let mut modifications = ConstraintModifications::identity();
+		let para_a = ParaId::from(1u32);
+		modifications.outbound_hrmp.insert(
+			para_a,
+			OutboundHrmpChannelModification { bytes_submitted: 513, messages_submitted: 1 },
+		);
+
+		assert_eq!(
+			constraints.check_modifications(&modifications),
+			Err(ModificationError::HrmpBytesOverflow {
+				para_id: para_a,
+				bytes_remaining: 512,
+				bytes_submitted: 513,
+			}),
+		);
+
+		assert_eq!(
+			constraints.apply_modifications(&modifications),
+			Err(ModificationError::HrmpBytesOverflow {
+				para_id: para_a,
+				bytes_remaining: 512,
+				bytes_submitted: 513,
+			}),
+		);
+	}
+
+	#[test]
+	fn constraints_ump_messages_overflow() {
+		let constraints = make_constraints();
+		let mut modifications = ConstraintModifications::identity();
+		modifications.ump_messages_sent = 11;
+
+		assert_eq!(
+			constraints.check_modifications(&modifications),
+			Err(ModificationError::UmpMessagesOverflow {
+				messages_remaining: 10,
+				messages_submitted: 11,
+			}),
+		);
+
+		assert_eq!(
+			constraints.apply_modifications(&modifications),
+			Err(ModificationError::UmpMessagesOverflow {
+				messages_remaining: 10,
+				messages_submitted: 11,
+			}),
+		);
+	}
+
+	#[test]
+	fn constraints_ump_bytes_overflow() {
+		let constraints = make_constraints();
+		let mut modifications = ConstraintModifications::identity();
+		modifications.ump_bytes_sent = 1025;
+
+		assert_eq!(
+			constraints.check_modifications(&modifications),
+			Err(ModificationError::UmpBytesOverflow {
+				bytes_remaining: 1024,
+				bytes_submitted: 1025,
+			}),
+		);
+
+		assert_eq!(
+			constraints.apply_modifications(&modifications),
+			Err(ModificationError::UmpBytesOverflow {
+				bytes_remaining: 1024,
+				bytes_submitted: 1025,
+			}),
+		);
+	}
+
+	#[test]
+	fn constraints_dmp_messages() {
+		let mut constraints = make_constraints();
+		let mut modifications = ConstraintModifications::identity();
+		assert!(constraints.check_modifications(&modifications).is_ok());
+		assert!(constraints.apply_modifications(&modifications).is_ok());
+
+		modifications.dmp_messages_processed = 6;
+
+		assert_eq!(
+			constraints.check_modifications(&modifications),
+			Err(ModificationError::DmpMessagesUnderflow {
+				messages_remaining: 0,
+				messages_processed: 6,
+			}),
+		);
+
+		assert_eq!(
+			constraints.apply_modifications(&modifications),
+			Err(ModificationError::DmpMessagesUnderflow {
+				messages_remaining: 0,
+				messages_processed: 6,
+			}),
+		);
+
+		constraints.dmp_remaining_messages = vec![1, 4, 8, 10];
+		modifications.dmp_messages_processed = 2;
+		assert!(constraints.check_modifications(&modifications).is_ok());
+		let constraints = constraints
+			.apply_modifications(&modifications)
+			.expect("modifications are valid");
+
+		assert_eq!(&constraints.dmp_remaining_messages, &[8, 10]);
+	}
+
+	#[test]
+	fn constraints_nonexistent_code_upgrade() {
+		let constraints = make_constraints();
+		let mut modifications = ConstraintModifications::identity();
+		modifications.code_upgrade_applied = true;
+
+		assert_eq!(
+			constraints.check_modifications(&modifications),
+			Err(ModificationError::AppliedNonexistentCodeUpgrade),
+		);
+
+		assert_eq!(
+			constraints.apply_modifications(&modifications),
+			Err(ModificationError::AppliedNonexistentCodeUpgrade),
+		);
+	}
+
+	fn make_candidate(
+		constraints: &Constraints,
+		relay_parent: &RelayChainBlockInfo,
+	) -> ProspectiveCandidate<'static> {
+		let collator_pair = CollatorPair::generate().0;
+		let collator = collator_pair.public();
+
+		let sig = collator_pair.sign(b"blabla".as_slice());
+
+		ProspectiveCandidate {
+			commitments: Cow::Owned(CandidateCommitments {
+				upward_messages: Default::default(),
+				horizontal_messages: Default::default(),
+				new_validation_code: None,
+				head_data: HeadData::from(vec![1, 2, 3, 4, 5]),
+				processed_downward_messages: 0,
+				hrmp_watermark: relay_parent.number,
+			}),
+			collator,
+			collator_signature: sig,
+			persisted_validation_data: PersistedValidationData {
+				parent_head: constraints.required_parent.clone(),
+				relay_parent_number: relay_parent.number,
+				relay_parent_storage_root: relay_parent.storage_root,
+				max_pov_size: constraints.max_pov_size as u32,
+			},
+			pov_hash: Hash::repeat_byte(1),
+			validation_code_hash: constraints.validation_code_hash,
+		}
+	}
+
+	#[test]
+	fn fragment_validation_code_mismatch() {
+		let relay_parent = RelayChainBlockInfo {
+			number: 6,
+			hash: Hash::repeat_byte(0x0a),
+			storage_root: Hash::repeat_byte(0xff),
+		};
+
+		let constraints = make_constraints();
+		let mut candidate = make_candidate(&constraints, &relay_parent);
+
+		let expected_code = constraints.validation_code_hash;
+		let got_code = ValidationCode(vec![9, 9, 9]).hash();
+
+		candidate.validation_code_hash = got_code;
+
+		assert_eq!(
+			Fragment::new(relay_parent, constraints, candidate),
+			Err(FragmentValidityError::ValidationCodeMismatch(expected_code, got_code,)),
+		)
+	}
+
+	#[test]
+	fn fragment_pvd_mismatch() {
+		let relay_parent = RelayChainBlockInfo {
+			number: 6,
+			hash: Hash::repeat_byte(0x0a),
+			storage_root: Hash::repeat_byte(0xff),
+		};
+
+		let relay_parent_b = RelayChainBlockInfo {
+			number: 6,
+			hash: Hash::repeat_byte(0x0b),
+			storage_root: Hash::repeat_byte(0xee),
+		};
+
+		let constraints = make_constraints();
+		let candidate = make_candidate(&constraints, &relay_parent);
+
+		let expected_pvd = PersistedValidationData {
+			parent_head: constraints.required_parent.clone(),
+			relay_parent_number: relay_parent_b.number,
+			relay_parent_storage_root: relay_parent_b.storage_root,
+			max_pov_size: constraints.max_pov_size as u32,
+		};
+
+		let got_pvd = candidate.persisted_validation_data.clone();
+
+		assert_eq!(
+			Fragment::new(relay_parent_b, constraints, candidate),
+			Err(FragmentValidityError::PersistedValidationDataMismatch(expected_pvd, got_pvd,)),
+		);
+	}
+
+	#[test]
+	fn fragment_code_size_too_large() {
+		let relay_parent = RelayChainBlockInfo {
+			number: 6,
+			hash: Hash::repeat_byte(0x0a),
+			storage_root: Hash::repeat_byte(0xff),
+		};
+
+		let constraints = make_constraints();
+		let mut candidate = make_candidate(&constraints, &relay_parent);
+
+		let max_code_size = constraints.max_code_size;
+		candidate.commitments_mut().new_validation_code = Some(vec![0; max_code_size + 1].into());
+
+		assert_eq!(
+			Fragment::new(relay_parent, constraints, candidate),
+			Err(FragmentValidityError::CodeSizeTooLarge(max_code_size, max_code_size + 1,)),
+		);
+	}
+
+	#[test]
+	fn fragment_relay_parent_too_old() {
+		let relay_parent = RelayChainBlockInfo {
+			number: 3,
+			hash: Hash::repeat_byte(0x0a),
+			storage_root: Hash::repeat_byte(0xff),
+		};
+
+		let constraints = make_constraints();
+		let candidate = make_candidate(&constraints, &relay_parent);
+
+		assert_eq!(
+			Fragment::new(relay_parent, constraints, candidate),
+			Err(FragmentValidityError::RelayParentTooOld(5, 3,)),
+		);
+	}
+
+	#[test]
+	fn fragment_hrmp_messages_overflow() {
+		let relay_parent = RelayChainBlockInfo {
+			number: 6,
+			hash: Hash::repeat_byte(0x0a),
+			storage_root: Hash::repeat_byte(0xff),
+		};
+
+		let constraints = make_constraints();
+		let mut candidate = make_candidate(&constraints, &relay_parent);
+
+		let max_hrmp = constraints.max_hrmp_num_per_candidate;
+
+		candidate
+			.commitments_mut()
+			.horizontal_messages
+			.try_extend((0..max_hrmp + 1).map(|i| OutboundHrmpMessage {
+				recipient: ParaId::from(i as u32),
+				data: vec![1, 2, 3],
+			}))
+			.unwrap();
+
+		assert_eq!(
+			Fragment::new(relay_parent, constraints, candidate),
+			Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow {
+				messages_allowed: max_hrmp,
+				messages_submitted: max_hrmp + 1,
+			}),
+		);
+	}
+
+	#[test]
+	fn fragment_dmp_advancement_rule() {
+		let relay_parent = RelayChainBlockInfo {
+			number: 6,
+			hash: Hash::repeat_byte(0x0a),
+			storage_root: Hash::repeat_byte(0xff),
+		};
+
+		let mut constraints = make_constraints();
+		let mut candidate = make_candidate(&constraints, &relay_parent);
+
+		// Empty dmp queue is ok.
+		assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok());
+		// Unprocessed message that was sent later is ok.
+		constraints.dmp_remaining_messages = vec![relay_parent.number + 1];
+		assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok());
+
+		for block_number in 0..=relay_parent.number {
+			constraints.dmp_remaining_messages = vec![block_number];
+
+			assert_eq!(
+				Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()),
+				Err(FragmentValidityError::DmpAdvancementRule),
+			);
+		}
+
+		candidate.commitments.to_mut().processed_downward_messages = 1;
+		assert!(Fragment::new(relay_parent, constraints, candidate).is_ok());
+	}
+
+	#[test]
+	fn fragment_ump_messages_overflow() {
+		let relay_parent = RelayChainBlockInfo {
+			number: 6,
+			hash: Hash::repeat_byte(0x0a),
+			storage_root: Hash::repeat_byte(0xff),
+		};
+
+		let constraints = make_constraints();
+		let mut candidate = make_candidate(&constraints, &relay_parent);
+
+		let max_ump = constraints.max_ump_num_per_candidate;
+
+		candidate
+			.commitments
+			.to_mut()
+			.upward_messages
+			.try_extend((0..max_ump + 1).map(|i| vec![i as u8]))
+			.unwrap();
+
+		assert_eq!(
+			Fragment::new(relay_parent, constraints, candidate),
+			Err(FragmentValidityError::UmpMessagesPerCandidateOverflow {
+				messages_allowed: max_ump,
+				messages_submitted: max_ump + 1,
+			}),
+		);
+	}
+
+	#[test]
+	fn fragment_code_upgrade_restricted() {
+		let relay_parent = RelayChainBlockInfo {
+			number: 6,
+			hash: Hash::repeat_byte(0x0a),
+			storage_root: Hash::repeat_byte(0xff),
+		};
+
+		let mut constraints = make_constraints();
+		let mut candidate = make_candidate(&constraints, &relay_parent);
+
+		constraints.upgrade_restriction = Some(UpgradeRestriction::Present);
+		candidate.commitments_mut().new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
+
+		assert_eq!(
+			Fragment::new(relay_parent, constraints, candidate),
+			Err(FragmentValidityError::CodeUpgradeRestricted),
+		);
+	}
+
+	#[test]
+	fn fragment_hrmp_messages_descending_or_duplicate() {
+		let relay_parent = RelayChainBlockInfo {
+			number: 6,
+			hash: Hash::repeat_byte(0x0a),
+			storage_root: Hash::repeat_byte(0xff),
+		};
+
+		let constraints = make_constraints();
+		let mut candidate = make_candidate(&constraints, &relay_parent);
+
+		candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![
+			OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] },
+			OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] },
+		]);
+
+		assert_eq!(
+			Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()),
+			Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)),
+		);
+
+		candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![
+			OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] },
+			OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] },
+		]);
+
+		assert_eq!(
+			Fragment::new(relay_parent, constraints, candidate),
+			Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)),
+		);
+	}
+}
diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs
deleted file mode 100644
index eb063229752..00000000000
--- a/polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs
+++ /dev/null
@@ -1,1450 +0,0 @@
-// Copyright (C) Parity Technologies (UK) Ltd.
-// This file is part of Polkadot.
-
-// Polkadot is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Polkadot is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-
-//! The implementation of the inclusion emulator for the 'staging' runtime version.
-//!
-//! # Overview
-//!
-//! A set of utilities for node-side code to emulate the logic the runtime uses for checking
-//! parachain blocks in order to build prospective parachains that are produced ahead of the
-//! relay chain. These utilities allow the node-side to predict, with high accuracy, what
-//! the relay-chain will accept in the near future.
-//!
-//! This module has 2 key data types: [`Constraints`] and [`Fragment`]s. [`Constraints`]
-//! exhaustively define the set of valid inputs and outputs to parachain execution. A [`Fragment`]
-//! indicates a parachain block, anchored to the relay-chain at a particular relay-chain block,
-//! known as the relay-parent.
-//!
-//! ## Fragment Validity
-//!
-//! Every relay-parent is implicitly associated with a unique set of [`Constraints`] that describe
-//! the properties that must be true for a block to be included in a direct child of that block,
-//! assuming there is no intermediate parachain block pending availability.
-//!
-//! However, the key factor that makes asynchronously-grown prospective chains
-//! possible is the fact that the relay-chain accepts candidate blocks based on whether they
-//! are valid under the constraints of the present moment, not based on whether they were
-//! valid at the time of construction.
-//!
-//! As such, [`Fragment`]s are often, but not always constructed in such a way that they are
-//! invalid at first and become valid later on, as the relay chain grows.
-//!
-//! # Usage
-//!
-//! It's expected that the users of this module will be building up trees of
-//! [`Fragment`]s and consistently pruning and adding to the tree.
-//!
-//! ## Operating Constraints
-//!
-//! The *operating constraints* of a `Fragment` are the constraints with which that fragment
-//! was intended to comply. The operating constraints are defined as the base constraints
-//! of the relay-parent of the fragment modified by the cumulative modifications of all
-//! fragments between the relay-parent and the current fragment.
-//!
-//! What the operating constraints are, in practice, is a prediction about the state of the
-//! relay-chain in the future. The relay-chain is aware of some current state, and we want to
-//! make an intelligent prediction about what might be accepted in the future based on
-//! prior fragments that also exist off-chain.
-//!
-//! ## Fragment Trees
-//!
-//! As the relay-chain grows, some predictions come true and others come false.
-//! And new predictions get made. These three changes correspond distinctly to the
-//! 3 primary operations on fragment trees.
-//!
-//! A fragment tree is a mental model for thinking about a forking series of predictions
-//! about a single parachain. There may be one or more fragment trees per parachain.
-//!
-//! In expectation, most parachains will have a plausibly-unique authorship method which means that
-//! they should really be much closer to fragment-chains, maybe with an occasional fork.
-//!
-//! Avoiding fragment-tree blowup is beyond the scope of this module.
-//!
-//! ### Pruning Fragment Trees
-//!
-//! When the relay-chain advances, we want to compare the new constraints of that relay-parent to
-//! the roots of the fragment trees we have. There are 3 cases:
-//!
-//! 1. The root fragment is still valid under the new constraints. In this case, we do nothing. This
-//! is the "prediction still uncertain" case.
-//!
-//! 2. The root fragment is invalid under the new constraints because it has been subsumed by the
-//! relay-chain. In this case, we can discard the root and split & re-root the fragment tree under
-//! its descendents and compare to the new constraints again. This is the "prediction came true"
-//! case.
-//!
-//! 3. The root fragment is invalid under the new constraints because a competing parachain block
-//! has been included or it would never be accepted for some other reason. In this case we can
-//! discard the entire fragment tree. This is the "prediction came false" case.
-//!
-//! This is all a bit of a simplification because it assumes that the relay-chain advances without
-//! forks and is finalized instantly. In practice, the set of fragment-trees needs to be observable
-//! from the perspective of a few different possible forks of the relay-chain and not pruned
-//! too eagerly.
-//!
-//! Note that the fragments themselves don't need to change and the only thing we care about
-//! is whether the predictions they represent are still valid.
-//!
-//! ### Extending Fragment Trees
-//!
-//! As predictions fade into the past, new ones should be stacked on top.
-//!
-//! Every new relay-chain block is an opportunity to make a new prediction about the future.
-//! Higher-level logic should select the leaves of the fragment-trees to build upon or whether
-//! to create a new fragment-tree.
-//!
-//! ### Code Upgrades
-//!
-//! Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade scheduling
-//! logic is very path-dependent and intricate so we just assume that code upgrades
-//! can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep,
-//! in practice and code upgrades are fairly rare. So what's likely to happen around code
-//! upgrades is that the entire fragment-tree has to get discarded at some point.
-//!
-//! That means a few blocks of execution time lost, which is not a big deal for code upgrades
-//! in practice at most once every few weeks.
-
-use polkadot_primitives::vstaging::{
-	BlockNumber, CandidateCommitments, CollatorId, CollatorSignature,
-	Constraints as PrimitiveConstraints, Hash, HeadData, Id as ParaId, PersistedValidationData,
-	UpgradeRestriction, ValidationCodeHash,
-};
-use std::{
-	borrow::{Borrow, Cow},
-	collections::HashMap,
-};
-
-/// Constraints on inbound HRMP channels.
-#[derive(Debug, Clone, PartialEq)]
-pub struct InboundHrmpLimitations {
-	/// An exhaustive set of all valid watermarks, sorted ascending
-	pub valid_watermarks: Vec<BlockNumber>,
-}
-
-/// Constraints on outbound HRMP channels.
-#[derive(Debug, Clone, PartialEq)]
-pub struct OutboundHrmpChannelLimitations {
-	/// The maximum bytes that can be written to the channel.
-	pub bytes_remaining: usize,
-	/// The maximum messages that can be written to the channel.
-	pub messages_remaining: usize,
-}
-
-/// Constraints on the actions that can be taken by a new parachain
-/// block. These limitations are implicitly associated with some particular
-/// parachain, which should be apparent from usage.
-#[derive(Debug, Clone, PartialEq)]
-pub struct Constraints {
-	/// The minimum relay-parent number accepted under these constraints.
-	pub min_relay_parent_number: BlockNumber,
-	/// The maximum Proof-of-Validity size allowed, in bytes.
-	pub max_pov_size: usize,
-	/// The maximum new validation code size allowed, in bytes.
-	pub max_code_size: usize,
-	/// The amount of UMP messages remaining.
-	pub ump_remaining: usize,
-	/// The amount of UMP bytes remaining.
-	pub ump_remaining_bytes: usize,
-	/// The maximum number of UMP messages allowed per candidate.
-	pub max_ump_num_per_candidate: usize,
-	/// Remaining DMP queue. Only includes sent-at block numbers.
-	pub dmp_remaining_messages: Vec<BlockNumber>,
-	/// The limitations of all registered inbound HRMP channels.
-	pub hrmp_inbound: InboundHrmpLimitations,
-	/// The limitations of all registered outbound HRMP channels.
-	pub hrmp_channels_out: HashMap<ParaId, OutboundHrmpChannelLimitations>,
-	/// The maximum number of HRMP messages allowed per candidate.
-	pub max_hrmp_num_per_candidate: usize,
-	/// The required parent head-data of the parachain.
-	pub required_parent: HeadData,
-	/// The expected validation-code-hash of this parachain.
-	pub validation_code_hash: ValidationCodeHash,
-	/// The code upgrade restriction signal as-of this parachain.
-	pub upgrade_restriction: Option<UpgradeRestriction>,
-	/// The future validation code hash, if any, and at what relay-parent
-	/// number the upgrade would be minimally applied.
-	pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>,
-}
-
-impl From<PrimitiveConstraints> for Constraints {
-	fn from(c: PrimitiveConstraints) -> Self {
-		Constraints {
-			min_relay_parent_number: c.min_relay_parent_number,
-			max_pov_size: c.max_pov_size as _,
-			max_code_size: c.max_code_size as _,
-			ump_remaining: c.ump_remaining as _,
-			ump_remaining_bytes: c.ump_remaining_bytes as _,
-			max_ump_num_per_candidate: c.max_ump_num_per_candidate as _,
-			dmp_remaining_messages: c.dmp_remaining_messages,
-			hrmp_inbound: InboundHrmpLimitations {
-				valid_watermarks: c.hrmp_inbound.valid_watermarks,
-			},
-			hrmp_channels_out: c
-				.hrmp_channels_out
-				.into_iter()
-				.map(|(para_id, limits)| {
-					(
-						para_id,
-						OutboundHrmpChannelLimitations {
-							bytes_remaining: limits.bytes_remaining as _,
-							messages_remaining: limits.messages_remaining as _,
-						},
-					)
-				})
-				.collect(),
-			max_hrmp_num_per_candidate: c.max_hrmp_num_per_candidate as _,
-			required_parent: c.required_parent,
-			validation_code_hash: c.validation_code_hash,
-			upgrade_restriction: c.upgrade_restriction,
-			future_validation_code: c.future_validation_code,
-		}
-	}
-}
-
-/// Kinds of errors that can occur when modifying constraints.
-#[derive(Debug, Clone, PartialEq)]
-pub enum ModificationError {
-	/// The HRMP watermark is not allowed.
-	DisallowedHrmpWatermark(BlockNumber),
-	/// No such HRMP outbound channel.
-	NoSuchHrmpChannel(ParaId),
-	/// Too many messages submitted to HRMP channel.
-	HrmpMessagesOverflow {
-		/// The ID of the recipient.
-		para_id: ParaId,
-		/// The amount of remaining messages in the capacity of the channel.
-		messages_remaining: usize,
-		/// The amount of messages submitted to the channel.
-		messages_submitted: usize,
-	},
-	/// Too many bytes submitted to HRMP channel.
-	HrmpBytesOverflow {
-		/// The ID of the recipient.
-		para_id: ParaId,
-		/// The amount of remaining bytes in the capacity of the channel.
-		bytes_remaining: usize,
-		/// The amount of bytes submitted to the channel.
-		bytes_submitted: usize,
-	},
-	/// Too many messages submitted to UMP.
-	UmpMessagesOverflow {
-		/// The amount of remaining messages in the capacity of UMP.
-		messages_remaining: usize,
-		/// The amount of messages submitted to UMP.
-		messages_submitted: usize,
-	},
-	/// Too many bytes submitted to UMP.
-	UmpBytesOverflow {
-		/// The amount of remaining bytes in the capacity of UMP.
-		bytes_remaining: usize,
-		/// The amount of bytes submitted to UMP.
-		bytes_submitted: usize,
-	},
-	/// Too many messages processed from DMP.
-	DmpMessagesUnderflow {
-		/// The amount of messages waiting to be processed from DMP.
-		messages_remaining: usize,
-		/// The amount of messages processed.
-		messages_processed: usize,
-	},
-	/// No validation code upgrade to apply.
-	AppliedNonexistentCodeUpgrade,
-}
-
-impl Constraints {
-	/// Check modifications against constraints.
-	pub fn check_modifications(
-		&self,
-		modifications: &ConstraintModifications,
-	) -> Result<(), ModificationError> {
-		if let Some(HrmpWatermarkUpdate::Trunk(hrmp_watermark)) = modifications.hrmp_watermark {
-			// head updates are always valid.
-			if self.hrmp_inbound.valid_watermarks.iter().all(|w| w != &hrmp_watermark) {
-				return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark))
-			}
-		}
-
-		for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp {
-			if let Some(outbound) = self.hrmp_channels_out.get(&id) {
-				outbound.bytes_remaining.checked_sub(outbound_hrmp_mod.bytes_submitted).ok_or(
-					ModificationError::HrmpBytesOverflow {
-						para_id: *id,
-						bytes_remaining: outbound.bytes_remaining,
-						bytes_submitted: outbound_hrmp_mod.bytes_submitted,
-					},
-				)?;
-
-				outbound
-					.messages_remaining
-					.checked_sub(outbound_hrmp_mod.messages_submitted)
-					.ok_or(ModificationError::HrmpMessagesOverflow {
-						para_id: *id,
-						messages_remaining: outbound.messages_remaining,
-						messages_submitted: outbound_hrmp_mod.messages_submitted,
-					})?;
-			} else {
-				return Err(ModificationError::NoSuchHrmpChannel(*id))
-			}
-		}
-
-		self.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or(
-			ModificationError::UmpMessagesOverflow {
-				messages_remaining: self.ump_remaining,
-				messages_submitted: modifications.ump_messages_sent,
-			},
-		)?;
-
-		self.ump_remaining_bytes.checked_sub(modifications.ump_bytes_sent).ok_or(
-			ModificationError::UmpBytesOverflow {
-				bytes_remaining: self.ump_remaining_bytes,
-				bytes_submitted: modifications.ump_bytes_sent,
-			},
-		)?;
-
-		self.dmp_remaining_messages
-			.len()
-			.checked_sub(modifications.dmp_messages_processed)
-			.ok_or(ModificationError::DmpMessagesUnderflow {
-				messages_remaining: self.dmp_remaining_messages.len(),
-				messages_processed: modifications.dmp_messages_processed,
-			})?;
-
-		if self.future_validation_code.is_none() && modifications.code_upgrade_applied {
-			return Err(ModificationError::AppliedNonexistentCodeUpgrade)
-		}
-
-		Ok(())
-	}
-
-	/// Apply modifications to these constraints. If this succeeds, it passes
-	/// all sanity-checks.
-	pub fn apply_modifications(
-		&self,
-		modifications: &ConstraintModifications,
-	) -> Result<Self, ModificationError> {
-		let mut new = self.clone();
-
-		if let Some(required_parent) = modifications.required_parent.as_ref() {
-			new.required_parent = required_parent.clone();
-		}
-
-		if let Some(ref hrmp_watermark) = modifications.hrmp_watermark {
-			match new.hrmp_inbound.valid_watermarks.binary_search(&hrmp_watermark.watermark()) {
-				Ok(pos) => {
-					// Exact match, so this is OK in all cases.
-					let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1);
-				},
-				Err(pos) => match hrmp_watermark {
-					HrmpWatermarkUpdate::Head(_) => {
-						// Updates to Head are always OK.
-						let _ = new.hrmp_inbound.valid_watermarks.drain(..pos);
-					},
-					HrmpWatermarkUpdate::Trunk(n) => {
-						// Trunk update landing on disallowed watermark is not OK.
-						return Err(ModificationError::DisallowedHrmpWatermark(*n))
-					},
-				},
-			}
-		}
-
-		for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp {
-			if let Some(outbound) = new.hrmp_channels_out.get_mut(&id) {
-				outbound.bytes_remaining = outbound
-					.bytes_remaining
-					.checked_sub(outbound_hrmp_mod.bytes_submitted)
-					.ok_or(ModificationError::HrmpBytesOverflow {
-						para_id: *id,
-						bytes_remaining: outbound.bytes_remaining,
-						bytes_submitted: outbound_hrmp_mod.bytes_submitted,
-					})?;
-
-				outbound.messages_remaining = outbound
-					.messages_remaining
-					.checked_sub(outbound_hrmp_mod.messages_submitted)
-					.ok_or(ModificationError::HrmpMessagesOverflow {
-						para_id: *id,
-						messages_remaining: outbound.messages_remaining,
-						messages_submitted: outbound_hrmp_mod.messages_submitted,
-					})?;
-			} else {
-				return Err(ModificationError::NoSuchHrmpChannel(*id))
-			}
-		}
-
-		new.ump_remaining = new.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or(
-			ModificationError::UmpMessagesOverflow {
-				messages_remaining: new.ump_remaining,
-				messages_submitted: modifications.ump_messages_sent,
-			},
-		)?;
-
-		new.ump_remaining_bytes = new
-			.ump_remaining_bytes
-			.checked_sub(modifications.ump_bytes_sent)
-			.ok_or(ModificationError::UmpBytesOverflow {
-				bytes_remaining: new.ump_remaining_bytes,
-				bytes_submitted: modifications.ump_bytes_sent,
-			})?;
-
-		if modifications.dmp_messages_processed > new.dmp_remaining_messages.len() {
-			return Err(ModificationError::DmpMessagesUnderflow {
-				messages_remaining: new.dmp_remaining_messages.len(),
-				messages_processed: modifications.dmp_messages_processed,
-			})
-		} else {
-			new.dmp_remaining_messages =
-				new.dmp_remaining_messages[modifications.dmp_messages_processed..].to_vec();
-		}
-
-		if modifications.code_upgrade_applied {
-			new.validation_code_hash = new
-				.future_validation_code
-				.take()
-				.ok_or(ModificationError::AppliedNonexistentCodeUpgrade)?
-				.1;
-		}
-
-		Ok(new)
-	}
-}
-
-/// Information about a relay-chain block.
-#[derive(Debug, Clone, PartialEq)]
-pub struct RelayChainBlockInfo {
-	/// The hash of the relay-chain block.
-	pub hash: Hash,
-	/// The number of the relay-chain block.
-	pub number: BlockNumber,
-	/// The storage-root of the relay-chain block.
-	pub storage_root: Hash,
-}
-
-/// An update to outbound HRMP channels.
-#[derive(Debug, Clone, PartialEq, Default)]
-pub struct OutboundHrmpChannelModification {
-	/// The number of bytes submitted to the channel.
-	pub bytes_submitted: usize,
-	/// The number of messages submitted to the channel.
-	pub messages_submitted: usize,
-}
-
-/// An update to the HRMP Watermark.
-#[derive(Debug, Clone, PartialEq)]
-pub enum HrmpWatermarkUpdate {
-	/// This is an update placing the watermark at the head of the chain,
-	/// which is always legal.
-	Head(BlockNumber),
-	/// This is an update placing the watermark behind the head of the
-	/// chain, which is only legal if it lands on a block where messages
-	/// were queued.
-	Trunk(BlockNumber),
-}
-
-impl HrmpWatermarkUpdate {
-	fn watermark(&self) -> BlockNumber {
-		match *self {
-			HrmpWatermarkUpdate::Head(n) | HrmpWatermarkUpdate::Trunk(n) => n,
-		}
-	}
-}
-
-/// Modifications to constraints as a result of prospective candidates.
-#[derive(Debug, Clone, PartialEq)]
-pub struct ConstraintModifications {
-	/// The required parent head to build upon.
-	pub required_parent: Option<HeadData>,
-	/// The new HRMP watermark
-	pub hrmp_watermark: Option<HrmpWatermarkUpdate>,
-	/// Outbound HRMP channel modifications.
-	pub outbound_hrmp: HashMap<ParaId, OutboundHrmpChannelModification>,
-	/// The amount of UMP messages sent.
-	pub ump_messages_sent: usize,
-	/// The amount of UMP bytes sent.
-	pub ump_bytes_sent: usize,
-	/// The amount of DMP messages processed.
-	pub dmp_messages_processed: usize,
-	/// Whether a pending code upgrade has been applied.
-	pub code_upgrade_applied: bool,
-}
-
-impl ConstraintModifications {
-	/// The 'identity' modifications: these can be applied to
-	/// any constraints and yield the exact same result.
-	pub fn identity() -> Self {
-		ConstraintModifications {
-			required_parent: None,
-			hrmp_watermark: None,
-			outbound_hrmp: HashMap::new(),
-			ump_messages_sent: 0,
-			ump_bytes_sent: 0,
-			dmp_messages_processed: 0,
-			code_upgrade_applied: false,
-		}
-	}
-
-	/// Stack other modifications on top of these.
-	///
-	/// This does no sanity-checking, so if `other` is garbage relative
-	/// to `self`, then the new value will be garbage as well.
-	///
-	/// This is an addition which is not commutative.
-	pub fn stack(&mut self, other: &Self) {
-		if let Some(ref new_parent) = other.required_parent {
-			self.required_parent = Some(new_parent.clone());
-		}
-		if let Some(ref new_hrmp_watermark) = other.hrmp_watermark {
-			self.hrmp_watermark = Some(new_hrmp_watermark.clone());
-		}
-
-		for (id, mods) in &other.outbound_hrmp {
-			let record = self.outbound_hrmp.entry(*id).or_default();
-			record.messages_submitted += mods.messages_submitted;
-			record.bytes_submitted += mods.bytes_submitted;
-		}
-
-		self.ump_messages_sent += other.ump_messages_sent;
-		self.ump_bytes_sent += other.ump_bytes_sent;
-		self.dmp_messages_processed += other.dmp_messages_processed;
-		self.code_upgrade_applied |= other.code_upgrade_applied;
-	}
-}
-
-/// The prospective candidate.
-///
-/// This comprises the key information that represent a candidate
-/// without pinning it to a particular session. For example, everything
-/// to do with the collator's signature and commitments are represented
-/// here. But the erasure-root is not. This means that prospective candidates
-/// are not correlated to any session in particular.
-#[derive(Debug, Clone, PartialEq)]
-pub struct ProspectiveCandidate<'a> {
-	/// The commitments to the output of the execution.
-	pub commitments: Cow<'a, CandidateCommitments>,
-	/// The collator that created the candidate.
-	pub collator: CollatorId,
-	/// The signature of the collator on the payload.
-	pub collator_signature: CollatorSignature,
-	/// The persisted validation data used to create the candidate.
-	pub persisted_validation_data: PersistedValidationData,
-	/// The hash of the PoV.
-	pub pov_hash: Hash,
-	/// The validation code hash used by the candidate.
-	pub validation_code_hash: ValidationCodeHash,
-}
-
-impl<'a> ProspectiveCandidate<'a> {
-	fn into_owned(self) -> ProspectiveCandidate<'static> {
-		ProspectiveCandidate { commitments: Cow::Owned(self.commitments.into_owned()), ..self }
-	}
-
-	/// Partially clone the prospective candidate, but borrow the
-	/// parts which are potentially heavy.
-	pub fn partial_clone(&self) -> ProspectiveCandidate {
-		ProspectiveCandidate {
-			commitments: Cow::Borrowed(self.commitments.borrow()),
-			collator: self.collator.clone(),
-			collator_signature: self.collator_signature.clone(),
-			persisted_validation_data: self.persisted_validation_data.clone(),
-			pov_hash: self.pov_hash,
-			validation_code_hash: self.validation_code_hash,
-		}
-	}
-}
-
-#[cfg(test)]
-impl ProspectiveCandidate<'static> {
-	fn commitments_mut(&mut self) -> &mut CandidateCommitments {
-		self.commitments.to_mut()
-	}
-}
-
-/// Kinds of errors with the validity of a fragment.
-#[derive(Debug, Clone, PartialEq)]
-pub enum FragmentValidityError {
-	/// The validation code of the candidate doesn't match the
-	/// operating constraints.
-	///
-	/// Expected, Got
-	ValidationCodeMismatch(ValidationCodeHash, ValidationCodeHash),
-	/// The persisted-validation-data doesn't match.
-	///
-	/// Expected, Got
-	PersistedValidationDataMismatch(PersistedValidationData, PersistedValidationData),
-	/// The outputs of the candidate are invalid under the operating
-	/// constraints.
-	OutputsInvalid(ModificationError),
-	/// New validation code size too big.
-	///
-	/// Max allowed, new.
-	CodeSizeTooLarge(usize, usize),
-	/// Relay parent too old.
-	///
-	/// Min allowed, current.
-	RelayParentTooOld(BlockNumber, BlockNumber),
-	/// Para is required to process at least one DMP message from the queue.
-	DmpAdvancementRule,
-	/// Too many messages upward messages submitted.
-	UmpMessagesPerCandidateOverflow {
-		/// The amount of messages a single candidate can submit.
-		messages_allowed: usize,
-		/// The amount of messages sent to all HRMP channels.
-		messages_submitted: usize,
-	},
-	/// Too many messages submitted to all HRMP channels.
-	HrmpMessagesPerCandidateOverflow {
-		/// The amount of messages a single candidate can submit.
-		messages_allowed: usize,
-		/// The amount of messages sent to all HRMP channels.
-		messages_submitted: usize,
-	},
-	/// Code upgrade not allowed.
-	CodeUpgradeRestricted,
-	/// HRMP messages are not ascending or are duplicate.
-	///
-	/// The `usize` is the index into the outbound HRMP messages of
-	/// the candidate.
-	HrmpMessagesDescendingOrDuplicate(usize),
-}
-
-/// A parachain fragment, representing another prospective parachain block.
-///
-/// This is a type which guarantees that the candidate is valid under the
-/// operating constraints.
-#[derive(Debug, Clone, PartialEq)]
-pub struct Fragment<'a> {
-	/// The new relay-parent.
-	relay_parent: RelayChainBlockInfo,
-	/// The constraints this fragment is operating under.
-	operating_constraints: Constraints,
-	/// The core information about the prospective candidate.
-	candidate: ProspectiveCandidate<'a>,
-	/// Modifications to the constraints based on the outputs of
-	/// the candidate.
-	modifications: ConstraintModifications,
-}
-
-impl<'a> Fragment<'a> {
-	/// Create a new fragment.
-	///
-	/// This fails if the fragment isn't in line with the operating
-	/// constraints. That is, either its inputs or its outputs fail
-	/// checks against the constraints.
-	///
-	/// This doesn't check that the collator signature is valid or
-	/// whether the PoV is small enough.
-	pub fn new(
-		relay_parent: RelayChainBlockInfo,
-		operating_constraints: Constraints,
-		candidate: ProspectiveCandidate<'a>,
-	) -> Result<Self, FragmentValidityError> {
-		let modifications = {
-			let commitments = &candidate.commitments;
-			ConstraintModifications {
-				required_parent: Some(commitments.head_data.clone()),
-				hrmp_watermark: Some({
-					if commitments.hrmp_watermark == relay_parent.number {
-						HrmpWatermarkUpdate::Head(commitments.hrmp_watermark)
-					} else {
-						HrmpWatermarkUpdate::Trunk(commitments.hrmp_watermark)
-					}
-				}),
-				outbound_hrmp: {
-					let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new();
-
-					let mut last_recipient = None::<ParaId>;
-					for (i, message) in commitments.horizontal_messages.iter().enumerate() {
-						if let Some(last) = last_recipient {
-							if last >= message.recipient {
-								return Err(
-									FragmentValidityError::HrmpMessagesDescendingOrDuplicate(i),
-								)
-							}
-						}
-
-						last_recipient = Some(message.recipient);
-						let record = outbound_hrmp.entry(message.recipient).or_default();
-
-						record.bytes_submitted += message.data.len();
-						record.messages_submitted += 1;
-					}
-
-					outbound_hrmp
-				},
-				ump_messages_sent: commitments.upward_messages.len(),
-				ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(),
-				dmp_messages_processed: commitments.processed_downward_messages as _,
-				code_upgrade_applied: operating_constraints
-					.future_validation_code
-					.map_or(false, |(at, _)| relay_parent.number >= at),
-			}
-		};
-
-		validate_against_constraints(
-			&operating_constraints,
-			&relay_parent,
-			&candidate,
-			&modifications,
-		)?;
-
-		Ok(Fragment { relay_parent, operating_constraints, candidate, modifications })
-	}
-
-	/// Access the relay parent information.
-	pub fn relay_parent(&self) -> &RelayChainBlockInfo {
-		&self.relay_parent
-	}
-
-	/// Access the operating constraints
-	pub fn operating_constraints(&self) -> &Constraints {
-		&self.operating_constraints
-	}
-
-	/// Access the underlying prospective candidate.
-	pub fn candidate(&self) -> &ProspectiveCandidate<'a> {
-		&self.candidate
-	}
-
-	/// Modifications to constraints based on the outputs of the candidate.
-	pub fn constraint_modifications(&self) -> &ConstraintModifications {
-		&self.modifications
-	}
-
-	/// Convert the fragment into an owned variant.
-	pub fn into_owned(self) -> Fragment<'static> {
-		Fragment { candidate: self.candidate.into_owned(), ..self }
-	}
-
-	/// Validate this fragment against some set of constraints
-	/// instead of the operating constraints.
-	pub fn validate_against_constraints(
-		&self,
-		constraints: &Constraints,
-	) -> Result<(), FragmentValidityError> {
-		validate_against_constraints(
-			constraints,
-			&self.relay_parent,
-			&self.candidate,
-			&self.modifications,
-		)
-	}
-}
-
-fn validate_against_constraints(
-	constraints: &Constraints,
-	relay_parent: &RelayChainBlockInfo,
-	candidate: &ProspectiveCandidate,
-	modifications: &ConstraintModifications,
-) -> Result<(), FragmentValidityError> {
-	let expected_pvd = PersistedValidationData {
-		parent_head: constraints.required_parent.clone(),
-		relay_parent_number: relay_parent.number,
-		relay_parent_storage_root: relay_parent.storage_root,
-		max_pov_size: constraints.max_pov_size as u32,
-	};
-
-	if expected_pvd != candidate.persisted_validation_data {
-		return Err(FragmentValidityError::PersistedValidationDataMismatch(
-			expected_pvd,
-			candidate.persisted_validation_data.clone(),
-		))
-	}
-
-	if constraints.validation_code_hash != candidate.validation_code_hash {
-		return Err(FragmentValidityError::ValidationCodeMismatch(
-			constraints.validation_code_hash,
-			candidate.validation_code_hash,
-		))
-	}
-
-	if relay_parent.number < constraints.min_relay_parent_number {
-		return Err(FragmentValidityError::RelayParentTooOld(
-			constraints.min_relay_parent_number,
-			relay_parent.number,
-		))
-	}
-
-	if candidate.commitments.new_validation_code.is_some() {
-		match constraints.upgrade_restriction {
-			None => {},
-			Some(UpgradeRestriction::Present) =>
-				return Err(FragmentValidityError::CodeUpgradeRestricted),
-		}
-	}
-
-	let announced_code_size = candidate
-		.commitments
-		.new_validation_code
-		.as_ref()
-		.map_or(0, |code| code.0.len());
-
-	if announced_code_size > constraints.max_code_size {
-		return Err(FragmentValidityError::CodeSizeTooLarge(
-			constraints.max_code_size,
-			announced_code_size,
-		))
-	}
-
-	if modifications.dmp_messages_processed == 0 {
-		if constraints
-			.dmp_remaining_messages
-			.get(0)
-			.map_or(false, |&msg_sent_at| msg_sent_at <= relay_parent.number)
-		{
-			return Err(FragmentValidityError::DmpAdvancementRule)
-		}
-	}
-
-	if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate {
-		return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow {
-			messages_allowed: constraints.max_hrmp_num_per_candidate,
-			messages_submitted: candidate.commitments.horizontal_messages.len(),
-		})
-	}
-
-	if candidate.commitments.upward_messages.len() > constraints.max_ump_num_per_candidate {
-		return Err(FragmentValidityError::UmpMessagesPerCandidateOverflow {
-			messages_allowed: constraints.max_ump_num_per_candidate,
-			messages_submitted: candidate.commitments.upward_messages.len(),
-		})
-	}
-
-	constraints
-		.check_modifications(&modifications)
-		.map_err(FragmentValidityError::OutputsInvalid)
-}
-
-#[cfg(test)]
-mod tests {
-	use super::*;
-	use polkadot_primitives::vstaging::{
-		CollatorPair, HorizontalMessages, OutboundHrmpMessage, ValidationCode,
-	};
-	use sp_application_crypto::Pair;
-
-	#[test]
-	fn stack_modifications() {
-		let para_a = ParaId::from(1u32);
-		let para_b = ParaId::from(2u32);
-		let para_c = ParaId::from(3u32);
-
-		let a = ConstraintModifications {
-			required_parent: None,
-			hrmp_watermark: None,
-			outbound_hrmp: {
-				let mut map = HashMap::new();
-				map.insert(
-					para_a,
-					OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 },
-				);
-
-				map.insert(
-					para_b,
-					OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 },
-				);
-
-				map
-			},
-			ump_messages_sent: 6,
-			ump_bytes_sent: 1000,
-			dmp_messages_processed: 5,
-			code_upgrade_applied: true,
-		};
-
-		let b = ConstraintModifications {
-			required_parent: None,
-			hrmp_watermark: None,
-			outbound_hrmp: {
-				let mut map = HashMap::new();
-				map.insert(
-					para_b,
-					OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 },
-				);
-
-				map.insert(
-					para_c,
-					OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 },
-				);
-
-				map
-			},
-			ump_messages_sent: 6,
-			ump_bytes_sent: 1000,
-			dmp_messages_processed: 5,
-			code_upgrade_applied: true,
-		};
-
-		let mut c = a.clone();
-		c.stack(&b);
-
-		assert_eq!(
-			c,
-			ConstraintModifications {
-				required_parent: None,
-				hrmp_watermark: None,
-				outbound_hrmp: {
-					let mut map = HashMap::new();
-					map.insert(
-						para_a,
-						OutboundHrmpChannelModification {
-							bytes_submitted: 100,
-							messages_submitted: 5,
-						},
-					);
-
-					map.insert(
-						para_b,
-						OutboundHrmpChannelModification {
-							bytes_submitted: 200,
-							messages_submitted: 10,
-						},
-					);
-
-					map.insert(
-						para_c,
-						OutboundHrmpChannelModification {
-							bytes_submitted: 100,
-							messages_submitted: 5,
-						},
-					);
-
-					map
-				},
-				ump_messages_sent: 12,
-				ump_bytes_sent: 2000,
-				dmp_messages_processed: 10,
-				code_upgrade_applied: true,
-			},
-		);
-
-		let mut d = ConstraintModifications::identity();
-		d.stack(&a);
-		d.stack(&b);
-
-		assert_eq!(c, d);
-	}
-
-	fn make_constraints() -> Constraints {
-		let para_a = ParaId::from(1u32);
-		let para_b = ParaId::from(2u32);
-		let para_c = ParaId::from(3u32);
-
-		Constraints {
-			min_relay_parent_number: 5,
-			max_pov_size: 1000,
-			max_code_size: 1000,
-			ump_remaining: 10,
-			ump_remaining_bytes: 1024,
-			max_ump_num_per_candidate: 5,
-			dmp_remaining_messages: Vec::new(),
-			hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![6, 8] },
-			hrmp_channels_out: {
-				let mut map = HashMap::new();
-
-				map.insert(
-					para_a,
-					OutboundHrmpChannelLimitations { messages_remaining: 5, bytes_remaining: 512 },
-				);
-
-				map.insert(
-					para_b,
-					OutboundHrmpChannelLimitations {
-						messages_remaining: 10,
-						bytes_remaining: 1024,
-					},
-				);
-
-				map.insert(
-					para_c,
-					OutboundHrmpChannelLimitations { messages_remaining: 1, bytes_remaining: 128 },
-				);
-
-				map
-			},
-			max_hrmp_num_per_candidate: 5,
-			required_parent: HeadData::from(vec![1, 2, 3]),
-			validation_code_hash: ValidationCode(vec![4, 5, 6]).hash(),
-			upgrade_restriction: None,
-			future_validation_code: None,
-		}
-	}
-
-	#[test]
-	fn constraints_disallowed_trunk_watermark() {
-		let constraints = make_constraints();
-		let mut modifications = ConstraintModifications::identity();
-		modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Trunk(7));
-
-		assert_eq!(
-			constraints.check_modifications(&modifications),
-			Err(ModificationError::DisallowedHrmpWatermark(7)),
-		);
-
-		assert_eq!(
-			constraints.apply_modifications(&modifications),
-			Err(ModificationError::DisallowedHrmpWatermark(7)),
-		);
-	}
-
-	#[test]
-	fn constraints_always_allow_head_watermark() {
-		let constraints = make_constraints();
-		let mut modifications = ConstraintModifications::identity();
-		modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Head(7));
-
-		assert!(constraints.check_modifications(&modifications).is_ok());
-
-		let new_constraints = constraints.apply_modifications(&modifications).unwrap();
-		assert_eq!(new_constraints.hrmp_inbound.valid_watermarks, vec![8]);
-	}
-
-	#[test]
-	fn constraints_no_such_hrmp_channel() {
-		let constraints = make_constraints();
-		let mut modifications = ConstraintModifications::identity();
-		let bad_para = ParaId::from(100u32);
-		modifications.outbound_hrmp.insert(
-			bad_para,
-			OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 0 },
-		);
-
-		assert_eq!(
-			constraints.check_modifications(&modifications),
-			Err(ModificationError::NoSuchHrmpChannel(bad_para)),
-		);
-
-		assert_eq!(
-			constraints.apply_modifications(&modifications),
-			Err(ModificationError::NoSuchHrmpChannel(bad_para)),
-		);
-	}
-
-	#[test]
-	fn constraints_hrmp_messages_overflow() {
-		let constraints = make_constraints();
-		let mut modifications = ConstraintModifications::identity();
-		let para_a = ParaId::from(1u32);
-		modifications.outbound_hrmp.insert(
-			para_a,
-			OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 6 },
-		);
-
-		assert_eq!(
-			constraints.check_modifications(&modifications),
-			Err(ModificationError::HrmpMessagesOverflow {
-				para_id: para_a,
-				messages_remaining: 5,
-				messages_submitted: 6,
-			}),
-		);
-
-		assert_eq!(
-			constraints.apply_modifications(&modifications),
-			Err(ModificationError::HrmpMessagesOverflow {
-				para_id: para_a,
-				messages_remaining: 5,
-				messages_submitted: 6,
-			}),
-		);
-	}
-
-	#[test]
-	fn constraints_hrmp_bytes_overflow() {
-		let constraints = make_constraints();
-		let mut modifications = ConstraintModifications::identity();
-		let para_a = ParaId::from(1u32);
-		modifications.outbound_hrmp.insert(
-			para_a,
-			OutboundHrmpChannelModification { bytes_submitted: 513, messages_submitted: 1 },
-		);
-
-		assert_eq!(
-			constraints.check_modifications(&modifications),
-			Err(ModificationError::HrmpBytesOverflow {
-				para_id: para_a,
-				bytes_remaining: 512,
-				bytes_submitted: 513,
-			}),
-		);
-
-		assert_eq!(
-			constraints.apply_modifications(&modifications),
-			Err(ModificationError::HrmpBytesOverflow {
-				para_id: para_a,
-				bytes_remaining: 512,
-				bytes_submitted: 513,
-			}),
-		);
-	}
-
-	#[test]
-	fn constraints_ump_messages_overflow() {
-		let constraints = make_constraints();
-		let mut modifications = ConstraintModifications::identity();
-		modifications.ump_messages_sent = 11;
-
-		assert_eq!(
-			constraints.check_modifications(&modifications),
-			Err(ModificationError::UmpMessagesOverflow {
-				messages_remaining: 10,
-				messages_submitted: 11,
-			}),
-		);
-
-		assert_eq!(
-			constraints.apply_modifications(&modifications),
-			Err(ModificationError::UmpMessagesOverflow {
-				messages_remaining: 10,
-				messages_submitted: 11,
-			}),
-		);
-	}
-
-	#[test]
-	fn constraints_ump_bytes_overflow() {
-		let constraints = make_constraints();
-		let mut modifications = ConstraintModifications::identity();
-		modifications.ump_bytes_sent = 1025;
-
-		assert_eq!(
-			constraints.check_modifications(&modifications),
-			Err(ModificationError::UmpBytesOverflow {
-				bytes_remaining: 1024,
-				bytes_submitted: 1025,
-			}),
-		);
-
-		assert_eq!(
-			constraints.apply_modifications(&modifications),
-			Err(ModificationError::UmpBytesOverflow {
-				bytes_remaining: 1024,
-				bytes_submitted: 1025,
-			}),
-		);
-	}
-
-	#[test]
-	fn constraints_dmp_messages() {
-		let mut constraints = make_constraints();
-		let mut modifications = ConstraintModifications::identity();
-		assert!(constraints.check_modifications(&modifications).is_ok());
-		assert!(constraints.apply_modifications(&modifications).is_ok());
-
-		modifications.dmp_messages_processed = 6;
-
-		assert_eq!(
-			constraints.check_modifications(&modifications),
-			Err(ModificationError::DmpMessagesUnderflow {
-				messages_remaining: 0,
-				messages_processed: 6,
-			}),
-		);
-
-		assert_eq!(
-			constraints.apply_modifications(&modifications),
-			Err(ModificationError::DmpMessagesUnderflow {
-				messages_remaining: 0,
-				messages_processed: 6,
-			}),
-		);
-
-		constraints.dmp_remaining_messages = vec![1, 4, 8, 10];
-		modifications.dmp_messages_processed = 2;
-		assert!(constraints.check_modifications(&modifications).is_ok());
-		let constraints = constraints
-			.apply_modifications(&modifications)
-			.expect("modifications are valid");
-
-		assert_eq!(&constraints.dmp_remaining_messages, &[8, 10]);
-	}
-
-	#[test]
-	fn constraints_nonexistent_code_upgrade() {
-		let constraints = make_constraints();
-		let mut modifications = ConstraintModifications::identity();
-		modifications.code_upgrade_applied = true;
-
-		assert_eq!(
-			constraints.check_modifications(&modifications),
-			Err(ModificationError::AppliedNonexistentCodeUpgrade),
-		);
-
-		assert_eq!(
-			constraints.apply_modifications(&modifications),
-			Err(ModificationError::AppliedNonexistentCodeUpgrade),
-		);
-	}
-
-	fn make_candidate(
-		constraints: &Constraints,
-		relay_parent: &RelayChainBlockInfo,
-	) -> ProspectiveCandidate<'static> {
-		let collator_pair = CollatorPair::generate().0;
-		let collator = collator_pair.public();
-
-		let sig = collator_pair.sign(b"blabla".as_slice());
-
-		ProspectiveCandidate {
-			commitments: Cow::Owned(CandidateCommitments {
-				upward_messages: Default::default(),
-				horizontal_messages: Default::default(),
-				new_validation_code: None,
-				head_data: HeadData::from(vec![1, 2, 3, 4, 5]),
-				processed_downward_messages: 0,
-				hrmp_watermark: relay_parent.number,
-			}),
-			collator,
-			collator_signature: sig,
-			persisted_validation_data: PersistedValidationData {
-				parent_head: constraints.required_parent.clone(),
-				relay_parent_number: relay_parent.number,
-				relay_parent_storage_root: relay_parent.storage_root,
-				max_pov_size: constraints.max_pov_size as u32,
-			},
-			pov_hash: Hash::repeat_byte(1),
-			validation_code_hash: constraints.validation_code_hash,
-		}
-	}
-
-	#[test]
-	fn fragment_validation_code_mismatch() {
-		let relay_parent = RelayChainBlockInfo {
-			number: 6,
-			hash: Hash::repeat_byte(0x0a),
-			storage_root: Hash::repeat_byte(0xff),
-		};
-
-		let constraints = make_constraints();
-		let mut candidate = make_candidate(&constraints, &relay_parent);
-
-		let expected_code = constraints.validation_code_hash;
-		let got_code = ValidationCode(vec![9, 9, 9]).hash();
-
-		candidate.validation_code_hash = got_code;
-
-		assert_eq!(
-			Fragment::new(relay_parent, constraints, candidate),
-			Err(FragmentValidityError::ValidationCodeMismatch(expected_code, got_code,)),
-		)
-	}
-
-	#[test]
-	fn fragment_pvd_mismatch() {
-		let relay_parent = RelayChainBlockInfo {
-			number: 6,
-			hash: Hash::repeat_byte(0x0a),
-			storage_root: Hash::repeat_byte(0xff),
-		};
-
-		let relay_parent_b = RelayChainBlockInfo {
-			number: 6,
-			hash: Hash::repeat_byte(0x0b),
-			storage_root: Hash::repeat_byte(0xee),
-		};
-
-		let constraints = make_constraints();
-		let candidate = make_candidate(&constraints, &relay_parent);
-
-		let expected_pvd = PersistedValidationData {
-			parent_head: constraints.required_parent.clone(),
-			relay_parent_number: relay_parent_b.number,
-			relay_parent_storage_root: relay_parent_b.storage_root,
-			max_pov_size: constraints.max_pov_size as u32,
-		};
-
-		let got_pvd = candidate.persisted_validation_data.clone();
-
-		assert_eq!(
-			Fragment::new(relay_parent_b, constraints, candidate),
-			Err(FragmentValidityError::PersistedValidationDataMismatch(expected_pvd, got_pvd,)),
-		);
-	}
-
-	#[test]
-	fn fragment_code_size_too_large() {
-		let relay_parent = RelayChainBlockInfo {
-			number: 6,
-			hash: Hash::repeat_byte(0x0a),
-			storage_root: Hash::repeat_byte(0xff),
-		};
-
-		let constraints = make_constraints();
-		let mut candidate = make_candidate(&constraints, &relay_parent);
-
-		let max_code_size = constraints.max_code_size;
-		candidate.commitments_mut().new_validation_code = Some(vec![0; max_code_size + 1].into());
-
-		assert_eq!(
-			Fragment::new(relay_parent, constraints, candidate),
-			Err(FragmentValidityError::CodeSizeTooLarge(max_code_size, max_code_size + 1,)),
-		);
-	}
-
-	#[test]
-	fn fragment_relay_parent_too_old() {
-		let relay_parent = RelayChainBlockInfo {
-			number: 3,
-			hash: Hash::repeat_byte(0x0a),
-			storage_root: Hash::repeat_byte(0xff),
-		};
-
-		let constraints = make_constraints();
-		let candidate = make_candidate(&constraints, &relay_parent);
-
-		assert_eq!(
-			Fragment::new(relay_parent, constraints, candidate),
-			Err(FragmentValidityError::RelayParentTooOld(5, 3,)),
-		);
-	}
-
-	#[test]
-	fn fragment_hrmp_messages_overflow() {
-		let relay_parent = RelayChainBlockInfo {
-			number: 6,
-			hash: Hash::repeat_byte(0x0a),
-			storage_root: Hash::repeat_byte(0xff),
-		};
-
-		let constraints = make_constraints();
-		let mut candidate = make_candidate(&constraints, &relay_parent);
-
-		let max_hrmp = constraints.max_hrmp_num_per_candidate;
-
-		candidate
-			.commitments_mut()
-			.horizontal_messages
-			.try_extend((0..max_hrmp + 1).map(|i| OutboundHrmpMessage {
-				recipient: ParaId::from(i as u32),
-				data: vec![1, 2, 3],
-			}))
-			.unwrap();
-
-		assert_eq!(
-			Fragment::new(relay_parent, constraints, candidate),
-			Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow {
-				messages_allowed: max_hrmp,
-				messages_submitted: max_hrmp + 1,
-			}),
-		);
-	}
-
-	#[test]
-	fn fragment_dmp_advancement_rule() {
-		let relay_parent = RelayChainBlockInfo {
-			number: 6,
-			hash: Hash::repeat_byte(0x0a),
-			storage_root: Hash::repeat_byte(0xff),
-		};
-
-		let mut constraints = make_constraints();
-		let mut candidate = make_candidate(&constraints, &relay_parent);
-
-		// Empty dmp queue is ok.
-		assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok());
-		// Unprocessed message that was sent later is ok.
-		constraints.dmp_remaining_messages = vec![relay_parent.number + 1];
-		assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok());
-
-		for block_number in 0..=relay_parent.number {
-			constraints.dmp_remaining_messages = vec![block_number];
-
-			assert_eq!(
-				Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()),
-				Err(FragmentValidityError::DmpAdvancementRule),
-			);
-		}
-
-		candidate.commitments.to_mut().processed_downward_messages = 1;
-		assert!(Fragment::new(relay_parent, constraints, candidate).is_ok());
-	}
-
-	#[test]
-	fn fragment_ump_messages_overflow() {
-		let relay_parent = RelayChainBlockInfo {
-			number: 6,
-			hash: Hash::repeat_byte(0x0a),
-			storage_root: Hash::repeat_byte(0xff),
-		};
-
-		let constraints = make_constraints();
-		let mut candidate = make_candidate(&constraints, &relay_parent);
-
-		let max_ump = constraints.max_ump_num_per_candidate;
-
-		candidate
-			.commitments
-			.to_mut()
-			.upward_messages
-			.try_extend((0..max_ump + 1).map(|i| vec![i as u8]))
-			.unwrap();
-
-		assert_eq!(
-			Fragment::new(relay_parent, constraints, candidate),
-			Err(FragmentValidityError::UmpMessagesPerCandidateOverflow {
-				messages_allowed: max_ump,
-				messages_submitted: max_ump + 1,
-			}),
-		);
-	}
-
-	#[test]
-	fn fragment_code_upgrade_restricted() {
-		let relay_parent = RelayChainBlockInfo {
-			number: 6,
-			hash: Hash::repeat_byte(0x0a),
-			storage_root: Hash::repeat_byte(0xff),
-		};
-
-		let mut constraints = make_constraints();
-		let mut candidate = make_candidate(&constraints, &relay_parent);
-
-		constraints.upgrade_restriction = Some(UpgradeRestriction::Present);
-		candidate.commitments_mut().new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
-
-		assert_eq!(
-			Fragment::new(relay_parent, constraints, candidate),
-			Err(FragmentValidityError::CodeUpgradeRestricted),
-		);
-	}
-
-	#[test]
-	fn fragment_hrmp_messages_descending_or_duplicate() {
-		let relay_parent = RelayChainBlockInfo {
-			number: 6,
-			hash: Hash::repeat_byte(0x0a),
-			storage_root: Hash::repeat_byte(0xff),
-		};
-
-		let constraints = make_constraints();
-		let mut candidate = make_candidate(&constraints, &relay_parent);
-
-		candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![
-			OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] },
-			OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] },
-		]);
-
-		assert_eq!(
-			Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()),
-			Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)),
-		);
-
-		candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![
-			OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] },
-			OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] },
-		]);
-
-		assert_eq!(
-			Fragment::new(relay_parent, constraints, candidate),
-			Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)),
-		);
-	}
-}
diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs
index daee4a8350e..e60a9ff82ee 100644
--- a/polkadot/node/subsystem-util/src/lib.rs
+++ b/polkadot/node/subsystem-util/src/lib.rs
@@ -43,7 +43,7 @@ use futures::channel::{mpsc, oneshot};
 use parity_scale_codec::Encode;
 
 use polkadot_primitives::{
-	vstaging as vstaging_primitives, AuthorityDiscoveryId, CandidateEvent, CandidateHash,
+	AsyncBackingParams, AuthorityDiscoveryId, CandidateEvent, CandidateHash,
 	CommittedCandidateReceipt, CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash,
 	Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes,
 	SessionIndex, SessionInfo, Signed, SigningContext, ValidationCode, ValidationCodeHash,
@@ -227,7 +227,7 @@ specialize_requests! {
 	fn request_key_ownership_proof(validator_id: ValidatorId) -> Option<slashing::OpaqueKeyOwnershipProof>; KeyOwnershipProof;
 	fn request_submit_report_dispute_lost(dp: slashing::DisputeProof, okop: slashing::OpaqueKeyOwnershipProof) -> Option<()>; SubmitReportDisputeLost;
 
-	fn request_staging_async_backing_params() -> vstaging_primitives::AsyncBackingParams; StagingAsyncBackingParams;
+	fn request_async_backing_params() -> AsyncBackingParams; AsyncBackingParams;
 }
 
 /// Requests executor parameters from the runtime effective at given relay-parent. First obtains
diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs
index c078b17d217..8d7cef88a70 100644
--- a/polkadot/node/subsystem-util/src/runtime/mod.rs
+++ b/polkadot/node/subsystem-util/src/runtime/mod.rs
@@ -30,16 +30,16 @@ use polkadot_node_subsystem::{
 };
 use polkadot_node_subsystem_types::UnpinHandle;
 use polkadot_primitives::{
-	vstaging, CandidateEvent, CandidateHash, CoreState, EncodeAs, ExecutorParams, GroupIndex,
-	GroupRotationInfo, Hash, IndexedVec, OccupiedCore, ScrapedOnChainVotes, SessionIndex,
-	SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash,
-	ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES,
+	slashing, AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, EncodeAs,
+	ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore,
+	ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned,
+	ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES,
 };
 
 use crate::{
-	request_availability_cores, request_candidate_events, request_from_runtime,
-	request_key_ownership_proof, request_on_chain_votes, request_session_executor_params,
-	request_session_index_for_child, request_session_info, request_staging_async_backing_params,
+	request_async_backing_params, request_availability_cores, request_candidate_events,
+	request_from_runtime, request_key_ownership_proof, request_on_chain_votes,
+	request_session_executor_params, request_session_index_for_child, request_session_info,
 	request_submit_report_dispute_lost, request_unapplied_slashes, request_validation_code_by_hash,
 	request_validator_groups,
 };
@@ -377,7 +377,7 @@ where
 pub async fn get_unapplied_slashes<Sender>(
 	sender: &mut Sender,
 	relay_parent: Hash,
-) -> Result<Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>>
+) -> Result<Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>>
 where
 	Sender: SubsystemSender<RuntimeApiMessage>,
 {
@@ -392,7 +392,7 @@ pub async fn key_ownership_proof<Sender>(
 	sender: &mut Sender,
 	relay_parent: Hash,
 	validator_id: ValidatorId,
-) -> Result<Option<vstaging::slashing::OpaqueKeyOwnershipProof>>
+) -> Result<Option<slashing::OpaqueKeyOwnershipProof>>
 where
 	Sender: SubsystemSender<RuntimeApiMessage>,
 {
@@ -403,8 +403,8 @@ where
 pub async fn submit_report_dispute_lost<Sender>(
 	sender: &mut Sender,
 	relay_parent: Hash,
-	dispute_proof: vstaging::slashing::DisputeProof,
-	key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof,
+	dispute_proof: slashing::DisputeProof,
+	key_ownership_proof: slashing::OpaqueKeyOwnershipProof,
 ) -> Result<Option<()>>
 where
 	Sender: SubsystemSender<RuntimeApiMessage>,
@@ -429,7 +429,7 @@ where
 pub enum ProspectiveParachainsMode {
 	/// Runtime API without support of `async_backing_params`: no prospective parachains.
 	Disabled,
-	/// vstaging runtime API: prospective parachains.
+	/// v6 runtime API: prospective parachains.
 	Enabled {
 		/// The maximum number of para blocks between the para head in a relay parent
 		/// and a new candidate. Restricts nodes from building arbitrary long chains
@@ -457,8 +457,7 @@ pub async fn prospective_parachains_mode<Sender>(
 where
 	Sender: SubsystemSender<RuntimeApiMessage>,
 {
-	let result =
-		recv_runtime(request_staging_async_backing_params(relay_parent, sender).await).await;
+	let result = recv_runtime(request_async_backing_params(relay_parent, sender).await).await;
 
 	if let Err(error::Error::RuntimeRequest(RuntimeApiError::NotSupported { runtime_api_name })) =
 		&result
@@ -472,7 +471,7 @@ where
 
 		Ok(ProspectiveParachainsMode::Disabled)
 	} else {
-		let vstaging::AsyncBackingParams { max_candidate_depth, allowed_ancestry_len } = result?;
+		let AsyncBackingParams { max_candidate_depth, allowed_ancestry_len } = result?;
 		Ok(ProspectiveParachainsMode::Enabled {
 			max_candidate_depth: max_candidate_depth as _,
 			allowed_ancestry_len: allowed_ancestry_len as _,
diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml
index fcbba9bbe21..73b1fab529e 100644
--- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml
+++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml
@@ -39,6 +39,3 @@ sc-service = { path = "../../../../../substrate/client/service" }
 sp-keyring = { path = "../../../../../substrate/primitives/keyring" }
 
 tokio = { version = "1.24.2", features = ["macros"] }
-
-[features]
-network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ]
diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs
index 9121b379085..5adb6d25313 100644
--- a/polkadot/primitives/src/lib.rs
+++ b/polkadot/primitives/src/lib.rs
@@ -19,8 +19,8 @@
 #![warn(missing_docs)]
 #![cfg_attr(not(feature = "std"), no_std)]
 
-// `v5` is currently the latest stable version of the runtime API.
-pub mod v5;
+// `v6` is currently the latest stable version of the runtime API.
+pub mod v6;
 
 // The 'staging' version is special - it contains primitives which are
 // still in development. Once they are considered stable, they will be
@@ -33,20 +33,21 @@ pub mod runtime_api;
 
 // Current primitives not requiring versioning are exported here.
 // Primitives requiring versioning must not be exported and must be referred by an exact version.
-pub use v5::{
-	byzantine_threshold, check_candidate_backing, collator_signature_payload,
+pub use v6::{
+	async_backing, byzantine_threshold, check_candidate_backing, collator_signature_payload,
 	effective_minimum_backing_votes, metric_definitions, slashing, supermajority_threshold,
 	well_known_keys, AbridgedHostConfiguration, AbridgedHrmpChannel, AccountId, AccountIndex,
-	AccountPublic, ApprovalVote, AssignmentId, AuthorityDiscoveryId, AvailabilityBitfield,
-	BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, CandidateCommitments,
-	CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt,
-	CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, CollatorSignature,
-	CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, CoreState, DisputeState,
-	DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, ExecutorParam,
-	ExecutorParams, ExecutorParamsHash, ExplicitDisputeStatement, GroupIndex, GroupRotationInfo,
-	Hash, HashT, HeadData, Header, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage,
-	IndexedVec, InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce,
-	OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry,
+	AccountPublic, ApprovalVote, AssignmentId, AsyncBackingParams, AuthorityDiscoveryId,
+	AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber,
+	CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex,
+	CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId,
+	CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex,
+	CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs,
+	ExecutorParam, ExecutorParams, ExecutorParamsHash, ExplicitDisputeStatement, GroupIndex,
+	GroupRotationInfo, Hash, HashT, HeadData, Header, HorizontalMessages, HrmpChannelId, Id,
+	InboundDownwardMessage, InboundHrmpMessage, IndexedVec, InherentData,
+	InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, OccupiedCore,
+	OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry,
 	PersistedValidationData, PvfCheckStatement, PvfExecTimeoutKind, PvfPrepTimeoutKind,
 	RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels,
 	RuntimeMetricOp, RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex,
@@ -61,4 +62,4 @@ pub use v5::{
 };
 
 #[cfg(feature = "std")]
-pub use v5::{AssignmentPair, CollatorPair, ValidatorPair};
+pub use v6::{AssignmentPair, CollatorPair, ValidatorPair};
diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs
index e5f1aa4276e..6cb66d40204 100644
--- a/polkadot/primitives/src/runtime_api.rs
+++ b/polkadot/primitives/src/runtime_api.rs
@@ -114,10 +114,11 @@
 //! separated from the stable primitives.
 
 use crate::{
-	vstaging, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash,
-	CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo,
-	OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes,
-	SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature,
+	async_backing, slashing, AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent,
+	CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams,
+	GroupRotationInfo, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement,
+	ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex,
+	ValidatorSignature,
 };
 use parity_scale_codec::{Decode, Encode};
 use polkadot_core_primitives as pcp;
@@ -224,38 +225,37 @@ sp_api::decl_runtime_apis! {
 
 		/// Returns a list of validators that lost a past session dispute and need to be slashed.
 		/// NOTE: This function is only available since parachain host version 5.
-		fn unapplied_slashes() -> Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>;
+		fn unapplied_slashes() -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>;
 
 		/// Returns a merkle proof of a validator session key.
 		/// NOTE: This function is only available since parachain host version 5.
 		fn key_ownership_proof(
 			validator_id: ValidatorId,
-		) -> Option<vstaging::slashing::OpaqueKeyOwnershipProof>;
+		) -> Option<slashing::OpaqueKeyOwnershipProof>;
 
 		/// Submit an unsigned extrinsic to slash validators who lost a dispute about
 		/// a candidate of a past session.
 		/// NOTE: This function is only available since parachain host version 5.
 		fn submit_report_dispute_lost(
-			dispute_proof: vstaging::slashing::DisputeProof,
-			key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof,
+			dispute_proof: slashing::DisputeProof,
+			key_ownership_proof: slashing::OpaqueKeyOwnershipProof,
 		) -> Option<()>;
 
-		/***** Staging *****/
+		/***** Added in v6 *****/
 
 		/// Get the minimum number of backing votes for a parachain candidate.
 		/// This is a staging method! Do not use on production runtimes!
 		#[api_version(6)]
 		fn minimum_backing_votes() -> u32;
 
-		/***** Asynchronous backing *****/
+		/***** Added in v7: Asynchronous backing *****/
 
 		/// Returns the state of parachain backing for a given para.
-		/// This is a staging method! Do not use on production runtimes!
-		#[api_version(99)]
-		fn staging_para_backing_state(_: ppp::Id) -> Option<vstaging::BackingState<H, N>>;
+		#[api_version(7)]
+		fn para_backing_state(_: ppp::Id) -> Option<async_backing::BackingState<H, N>>;
 
 		/// Returns candidate's acceptance limitations for asynchronous backing for a relay parent.
-		#[api_version(99)]
-		fn staging_async_backing_params() -> vstaging::AsyncBackingParams;
+		#[api_version(7)]
+		fn async_backing_params() -> AsyncBackingParams;
 	}
 }
diff --git a/polkadot/primitives/src/v6/async_backing.rs b/polkadot/primitives/src/v6/async_backing.rs
new file mode 100644
index 00000000000..1abe87b6dec
--- /dev/null
+++ b/polkadot/primitives/src/v6/async_backing.rs
@@ -0,0 +1,132 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Asynchronous backing primitives.
+
+use super::*;
+
+use parity_scale_codec::{Decode, Encode};
+use primitives::RuntimeDebug;
+use scale_info::TypeInfo;
+
+/// Candidate's acceptance limitations for asynchronous backing per relay parent.
+#[derive(
+	RuntimeDebug,
+	Copy,
+	Clone,
+	PartialEq,
+	Encode,
+	Decode,
+	TypeInfo,
+	serde::Serialize,
+	serde::Deserialize,
+)]
+
+pub struct AsyncBackingParams {
+	/// The maximum number of para blocks between the para head in a relay parent
+	/// and a new candidate. Restricts nodes from building arbitrary long chains
+	/// and spamming other validators.
+	///
+	/// When async backing is disabled, the only valid value is 0.
+	pub max_candidate_depth: u32,
+	/// How many ancestors of a relay parent are allowed to build candidates on top
+	/// of.
+	///
+	/// When async backing is disabled, the only valid value is 0.
+	pub allowed_ancestry_len: u32,
+}
+
+/// Constraints on inbound HRMP channels.
+#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)]
+pub struct InboundHrmpLimitations<N = BlockNumber> {
+	/// An exhaustive set of all valid watermarks, sorted ascending.
+	///
+	/// It's only expected to contain block numbers at which messages were
+	/// previously sent to a para, excluding most recent head.
+	pub valid_watermarks: Vec<N>,
+}
+
+/// Constraints on outbound HRMP channels.
+#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)]
+pub struct OutboundHrmpChannelLimitations {
+	/// The maximum bytes that can be written to the channel.
+	pub bytes_remaining: u32,
+	/// The maximum messages that can be written to the channel.
+	pub messages_remaining: u32,
+}
+
+/// Constraints on the actions that can be taken by a new parachain
+/// block. These limitations are implicitly associated with some particular
+/// parachain, which should be apparent from usage.
+#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)]
+pub struct Constraints<N = BlockNumber> {
+	/// The minimum relay-parent number accepted under these constraints.
+	pub min_relay_parent_number: N,
+	/// The maximum Proof-of-Validity size allowed, in bytes.
+	pub max_pov_size: u32,
+	/// The maximum new validation code size allowed, in bytes.
+	pub max_code_size: u32,
+	/// The amount of UMP messages remaining.
+	pub ump_remaining: u32,
+	/// The amount of UMP bytes remaining.
+	pub ump_remaining_bytes: u32,
+	/// The maximum number of UMP messages allowed per candidate.
+	pub max_ump_num_per_candidate: u32,
+	/// Remaining DMP queue. Only includes sent-at block numbers.
+	pub dmp_remaining_messages: Vec<N>,
+	/// The limitations of all registered inbound HRMP channels.
+	pub hrmp_inbound: InboundHrmpLimitations<N>,
+	/// The limitations of all registered outbound HRMP channels.
+	pub hrmp_channels_out: Vec<(Id, OutboundHrmpChannelLimitations)>,
+	/// The maximum number of HRMP messages allowed per candidate.
+	pub max_hrmp_num_per_candidate: u32,
+	/// The required parent head-data of the parachain.
+	pub required_parent: HeadData,
+	/// The expected validation-code-hash of this parachain.
+	pub validation_code_hash: ValidationCodeHash,
+	/// The code upgrade restriction signal as-of this parachain.
+	pub upgrade_restriction: Option<UpgradeRestriction>,
+	/// The future validation code hash, if any, and at what relay-parent
+	/// number the upgrade would be minimally applied.
+	pub future_validation_code: Option<(N, ValidationCodeHash)>,
+}
+
+/// A candidate pending availability.
+#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)]
+pub struct CandidatePendingAvailability<H = Hash, N = BlockNumber> {
+	/// The hash of the candidate.
+	pub candidate_hash: CandidateHash,
+	/// The candidate's descriptor.
+	pub descriptor: CandidateDescriptor<H>,
+	/// The commitments of the candidate.
+	pub commitments: CandidateCommitments,
+	/// The candidate's relay parent's number.
+	pub relay_parent_number: N,
+	/// The maximum Proof-of-Validity size allowed, in bytes.
+	pub max_pov_size: u32,
+}
+
+/// The per-parachain state of the backing system, including
+/// state-machine constraints and candidates pending availability.
+#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)]
+pub struct BackingState<H = Hash, N = BlockNumber> {
+	/// The state-machine constraints of the parachain.
+	pub constraints: Constraints<N>,
+	/// The candidates pending availability. These should be ordered, i.e. they should form
+	/// a sub-chain, where the first candidate builds on top of the required parent of the
+	/// constraints and each subsequent builds on top of the previous head-data.
+	pub pending_availability: Vec<CandidatePendingAvailability<H, N>>,
+}
diff --git a/polkadot/primitives/src/v5/executor_params.rs b/polkadot/primitives/src/v6/executor_params.rs
similarity index 100%
rename from polkadot/primitives/src/v5/executor_params.rs
rename to polkadot/primitives/src/v6/executor_params.rs
diff --git a/polkadot/primitives/src/v5/metrics.rs b/polkadot/primitives/src/v6/metrics.rs
similarity index 100%
rename from polkadot/primitives/src/v5/metrics.rs
rename to polkadot/primitives/src/v6/metrics.rs
diff --git a/polkadot/primitives/src/v5/mod.rs b/polkadot/primitives/src/v6/mod.rs
similarity index 99%
rename from polkadot/primitives/src/v5/mod.rs
rename to polkadot/primitives/src/v6/mod.rs
index 81743225403..cf900835517 100644
--- a/polkadot/primitives/src/v5/mod.rs
+++ b/polkadot/primitives/src/v6/mod.rs
@@ -14,7 +14,7 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
-//! `V2` Primitives.
+//! `V6` Primitives.
 
 use bitvec::vec::BitVec;
 use parity_scale_codec::{Decode, Encode};
@@ -57,8 +57,13 @@ pub use sp_staking::SessionIndex;
 mod signed;
 pub use signed::{EncodeAs, Signed, UncheckedSigned};
 
+pub mod async_backing;
+pub mod executor_params;
 pub mod slashing;
 
+pub use async_backing::AsyncBackingParams;
+pub use executor_params::{ExecutorParam, ExecutorParams, ExecutorParamsHash};
+
 mod metrics;
 pub use metrics::{
 	metric_definitions, RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues,
@@ -1116,7 +1121,7 @@ pub struct AbridgedHostConfiguration {
 	/// The delay, in blocks, before a validation upgrade is applied.
 	pub validation_upgrade_delay: BlockNumber,
 	/// Asynchronous backing parameters.
-	pub async_backing_params: super::vstaging::AsyncBackingParams,
+	pub async_backing_params: AsyncBackingParams,
 }
 
 /// Abridged version of `HrmpChannel` (from the `Hrmp` parachains host runtime module) meant to be
@@ -1803,9 +1808,6 @@ pub enum PvfExecTimeoutKind {
 	Approval,
 }
 
-pub mod executor_params;
-pub use executor_params::{ExecutorParam, ExecutorParams, ExecutorParamsHash};
-
 #[cfg(test)]
 mod tests {
 	use super::*;
diff --git a/polkadot/primitives/src/v5/signed.rs b/polkadot/primitives/src/v6/signed.rs
similarity index 100%
rename from polkadot/primitives/src/v5/signed.rs
rename to polkadot/primitives/src/v6/signed.rs
diff --git a/polkadot/primitives/src/v5/slashing.rs b/polkadot/primitives/src/v6/slashing.rs
similarity index 100%
rename from polkadot/primitives/src/v5/slashing.rs
rename to polkadot/primitives/src/v6/slashing.rs
diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs
index ea341ee5b4f..1429b0c326a 100644
--- a/polkadot/primitives/src/vstaging/mod.rs
+++ b/polkadot/primitives/src/vstaging/mod.rs
@@ -17,121 +17,3 @@
 //! Staging Primitives.
 
 // Put any primitives used by staging APIs functions here
-pub use crate::v5::*;
-use sp_std::prelude::*;
-
-use parity_scale_codec::{Decode, Encode};
-use primitives::RuntimeDebug;
-use scale_info::TypeInfo;
-
-/// Useful type alias for Para IDs.
-pub type ParaId = Id;
-
-/// Candidate's acceptance limitations for asynchronous backing per relay parent.
-#[derive(
-	RuntimeDebug,
-	Copy,
-	Clone,
-	PartialEq,
-	Encode,
-	Decode,
-	TypeInfo,
-	serde::Serialize,
-	serde::Deserialize,
-)]
-
-pub struct AsyncBackingParams {
-	/// The maximum number of para blocks between the para head in a relay parent
-	/// and a new candidate. Restricts nodes from building arbitrary long chains
-	/// and spamming other validators.
-	///
-	/// When async backing is disabled, the only valid value is 0.
-	pub max_candidate_depth: u32,
-	/// How many ancestors of a relay parent are allowed to build candidates on top
-	/// of.
-	///
-	/// When async backing is disabled, the only valid value is 0.
-	pub allowed_ancestry_len: u32,
-}
-
-/// Constraints on inbound HRMP channels.
-#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)]
-pub struct InboundHrmpLimitations<N = BlockNumber> {
-	/// An exhaustive set of all valid watermarks, sorted ascending.
-	///
-	/// It's only expected to contain block numbers at which messages were
-	/// previously sent to a para, excluding most recent head.
-	pub valid_watermarks: Vec<N>,
-}
-
-/// Constraints on outbound HRMP channels.
-#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)]
-pub struct OutboundHrmpChannelLimitations {
-	/// The maximum bytes that can be written to the channel.
-	pub bytes_remaining: u32,
-	/// The maximum messages that can be written to the channel.
-	pub messages_remaining: u32,
-}
-
-/// Constraints on the actions that can be taken by a new parachain
-/// block. These limitations are implicitly associated with some particular
-/// parachain, which should be apparent from usage.
-#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)]
-pub struct Constraints<N = BlockNumber> {
-	/// The minimum relay-parent number accepted under these constraints.
-	pub min_relay_parent_number: N,
-	/// The maximum Proof-of-Validity size allowed, in bytes.
-	pub max_pov_size: u32,
-	/// The maximum new validation code size allowed, in bytes.
-	pub max_code_size: u32,
-	/// The amount of UMP messages remaining.
-	pub ump_remaining: u32,
-	/// The amount of UMP bytes remaining.
-	pub ump_remaining_bytes: u32,
-	/// The maximum number of UMP messages allowed per candidate.
-	pub max_ump_num_per_candidate: u32,
-	/// Remaining DMP queue. Only includes sent-at block numbers.
-	pub dmp_remaining_messages: Vec<N>,
-	/// The limitations of all registered inbound HRMP channels.
-	pub hrmp_inbound: InboundHrmpLimitations<N>,
-	/// The limitations of all registered outbound HRMP channels.
-	pub hrmp_channels_out: Vec<(ParaId, OutboundHrmpChannelLimitations)>,
-	/// The maximum number of HRMP messages allowed per candidate.
-	pub max_hrmp_num_per_candidate: u32,
-	/// The required parent head-data of the parachain.
-	pub required_parent: HeadData,
-	/// The expected validation-code-hash of this parachain.
-	pub validation_code_hash: ValidationCodeHash,
-	/// The code upgrade restriction signal as-of this parachain.
-	pub upgrade_restriction: Option<UpgradeRestriction>,
-	/// The future validation code hash, if any, and at what relay-parent
-	/// number the upgrade would be minimally applied.
-	pub future_validation_code: Option<(N, ValidationCodeHash)>,
-}
-
-/// A candidate pending availability.
-#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)]
-pub struct CandidatePendingAvailability<H = Hash, N = BlockNumber> {
-	/// The hash of the candidate.
-	pub candidate_hash: CandidateHash,
-	/// The candidate's descriptor.
-	pub descriptor: CandidateDescriptor<H>,
-	/// The commitments of the candidate.
-	pub commitments: CandidateCommitments,
-	/// The candidate's relay parent's number.
-	pub relay_parent_number: N,
-	/// The maximum Proof-of-Validity size allowed, in bytes.
-	pub max_pov_size: u32,
-}
-
-/// The per-parachain state of the backing system, including
-/// state-machine constraints and candidates pending availability.
-#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)]
-pub struct BackingState<H = Hash, N = BlockNumber> {
-	/// The state-machine constraints of the parachain.
-	pub constraints: Constraints<N>,
-	/// The candidates pending availability. These should be ordered, i.e. they should form
-	/// a sub-chain, where the first candidate builds on top of the required parent of the
-	/// constraints and each subsequent builds on top of the previous head-data.
-	pub pending_availability: Vec<CandidatePendingAvailability<H, N>>,
-}
diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md
index a48444a46e4..286aeddb986 100644
--- a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md
+++ b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md
@@ -122,7 +122,7 @@ prospective validation data. This is unlikely to change.
 
 ### Outgoing
 
-- `RuntimeApiRequest::StagingParaBackingState`
+- `RuntimeApiRequest::ParaBackingState`
   - Gets the backing state of the given para (the constraints of the para and
     candidates pending availability).
 - `RuntimeApiRequest::AvailabilityCores`
diff --git a/polkadot/runtime/kusama/src/lib.rs b/polkadot/runtime/kusama/src/lib.rs
index 082e1aca375..1709c1bf8b1 100644
--- a/polkadot/runtime/kusama/src/lib.rs
+++ b/polkadot/runtime/kusama/src/lib.rs
@@ -46,7 +46,7 @@ use runtime_parachains::{
 	inclusion::{AggregateMessageOrigin, UmpQueueId},
 	initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras,
 	paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points,
-	runtime_api_impl::v5 as parachains_runtime_api_impl,
+	runtime_api_impl::v7 as parachains_runtime_api_impl,
 	scheduler as parachains_scheduler, session_info as parachains_session_info,
 	shared as parachains_shared,
 };
diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs
index fe9a4e52bd0..d07964b6916 100644
--- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs
+++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs
@@ -28,7 +28,7 @@ use crate::{
 };
 use frame_support::{assert_noop, assert_ok, error::BadOrigin};
 use pallet_balances::Error as BalancesError;
-use primitives::{v5::ValidationCode, BlockNumber, SessionIndex};
+use primitives::{BlockNumber, SessionIndex, ValidationCode};
 use sp_std::collections::btree_map::BTreeMap;
 
 fn schedule_blank_para(id: ParaId, parakind: ParaKind) {
diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs
index 33039cd08ca..f53f986a553 100644
--- a/polkadot/runtime/parachains/src/configuration.rs
+++ b/polkadot/runtime/parachains/src/configuration.rs
@@ -26,7 +26,7 @@ use polkadot_parachain_primitives::primitives::{
 	MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM,
 };
 use primitives::{
-	vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES,
+	AsyncBackingParams, Balance, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES,
 	MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE,
 };
 use sp_runtime::{traits::Zero, Perbill};
diff --git a/polkadot/runtime/parachains/src/configuration/migration/v6.rs b/polkadot/runtime/parachains/src/configuration/migration/v6.rs
index beed54deaff..19031a90bab 100644
--- a/polkadot/runtime/parachains/src/configuration/migration/v6.rs
+++ b/polkadot/runtime/parachains/src/configuration/migration/v6.rs
@@ -21,7 +21,7 @@ use frame_support::pallet_prelude::*;
 use frame_system::pallet_prelude::BlockNumberFor;
 use sp_std::vec::Vec;
 
-use primitives::{vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex};
+use primitives::{AsyncBackingParams, Balance, ExecutorParams, SessionIndex};
 #[cfg(feature = "try-runtime")]
 use sp_std::prelude::*;
 
diff --git a/polkadot/runtime/parachains/src/configuration/migration/v7.rs b/polkadot/runtime/parachains/src/configuration/migration/v7.rs
index 11365138120..1754b78e0a1 100644
--- a/polkadot/runtime/parachains/src/configuration/migration/v7.rs
+++ b/polkadot/runtime/parachains/src/configuration/migration/v7.rs
@@ -23,7 +23,7 @@ use frame_support::{
 	weights::Weight,
 };
 use frame_system::pallet_prelude::BlockNumberFor;
-use primitives::{vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex};
+use primitives::{AsyncBackingParams, Balance, ExecutorParams, SessionIndex};
 use sp_std::vec::Vec;
 
 use frame_support::traits::OnRuntimeUpgrade;
diff --git a/polkadot/runtime/parachains/src/configuration/migration/v8.rs b/polkadot/runtime/parachains/src/configuration/migration/v8.rs
index 5c5b3482183..d1bc9005112 100644
--- a/polkadot/runtime/parachains/src/configuration/migration/v8.rs
+++ b/polkadot/runtime/parachains/src/configuration/migration/v8.rs
@@ -24,8 +24,7 @@ use frame_support::{
 };
 use frame_system::pallet_prelude::BlockNumberFor;
 use primitives::{
-	vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex,
-	ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE,
+	AsyncBackingParams, Balance, ExecutorParams, SessionIndex, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE,
 };
 use sp_runtime::Perbill;
 use sp_std::vec::Vec;
diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs
index b27a7ab1ad7..9b2b7a48dc8 100644
--- a/polkadot/runtime/parachains/src/disputes/slashing.rs
+++ b/polkadot/runtime/parachains/src/disputes/slashing.rs
@@ -51,7 +51,7 @@ use frame_support::{
 use frame_system::pallet_prelude::BlockNumberFor;
 
 use primitives::{
-	vstaging::slashing::{DisputeProof, DisputesTimeSlot, PendingSlashes, SlashingOffenceKind},
+	slashing::{DisputeProof, DisputesTimeSlot, PendingSlashes, SlashingOffenceKind},
 	CandidateHash, SessionIndex, ValidatorId, ValidatorIndex,
 };
 use scale_info::TypeInfo;
diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs
index e066ad825a3..ba74e488cd3 100644
--- a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs
+++ b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs
@@ -25,5 +25,6 @@
 //! 1. Bump the version of the stable module (e.g. `v2` becomes `v3`)
 //! 2. Move methods from `vstaging` to `v3`. The new stable version should include all methods from
 //!    `vstaging` tagged with the new version number (e.g. all `v3` methods).
-pub mod v5;
+
+pub mod v7;
 pub mod vstaging;
diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v5.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs
similarity index 79%
rename from polkadot/runtime/parachains/src/runtime_api_impl/v5.rs
rename to polkadot/runtime/parachains/src/runtime_api_impl/v7.rs
index 46a609e0368..35d92f71084 100644
--- a/polkadot/runtime/parachains/src/runtime_api_impl/v5.rs
+++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs
@@ -18,12 +18,16 @@
 //! functions.
 
 use crate::{
-	disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent,
+	configuration, disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent,
 	scheduler::{self, CoreOccupied},
 	session_info, shared,
 };
 use frame_system::pallet_prelude::*;
 use primitives::{
+	async_backing::{
+		AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints,
+		InboundHrmpLimitations, OutboundHrmpChannelLimitations,
+	},
 	slashing, AuthorityDiscoveryId, CandidateEvent, CandidateHash, CommittedCandidateReceipt,
 	CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash,
 	Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCore, OccupiedCoreAssumption,
@@ -395,3 +399,100 @@ pub fn submit_unsigned_slashing_report<T: disputes::slashing::Config>(
 		key_ownership_proof,
 	)
 }
+
+/// Return the min backing votes threshold from the configuration.
+pub fn minimum_backing_votes<T: initializer::Config>() -> u32 {
+	<configuration::Pallet<T>>::config().minimum_backing_votes
+}
+
+/// Implementation for `ParaBackingState` function from the runtime API
+pub fn backing_state<T: initializer::Config>(
+	para_id: ParaId,
+) -> Option<BackingState<T::Hash, BlockNumberFor<T>>> {
+	let config = <configuration::Pallet<T>>::config();
+	// Async backing is only expected to be enabled with a tracker capacity of 1.
+	// Subsequent configuration update gets applied on new session, which always
+	// clears the buffer.
+	//
+	// Thus, minimum relay parent is ensured to have asynchronous backing enabled.
+	let now = <frame_system::Pallet<T>>::block_number();
+	let min_relay_parent_number = <shared::Pallet<T>>::allowed_relay_parents()
+		.hypothetical_earliest_block_number(now, config.async_backing_params.allowed_ancestry_len);
+
+	let required_parent = <paras::Pallet<T>>::para_head(para_id)?;
+	let validation_code_hash = <paras::Pallet<T>>::current_code_hash(para_id)?;
+
+	let upgrade_restriction = <paras::Pallet<T>>::upgrade_restriction_signal(para_id);
+	let future_validation_code =
+		<paras::Pallet<T>>::future_code_upgrade_at(para_id).and_then(|block_num| {
+			// Only read the storage if there's a pending upgrade.
+			Some(block_num).zip(<paras::Pallet<T>>::future_code_hash(para_id))
+		});
+
+	let (ump_msg_count, ump_total_bytes) =
+		<inclusion::Pallet<T>>::relay_dispatch_queue_size(para_id);
+	let ump_remaining = config.max_upward_queue_count - ump_msg_count;
+	let ump_remaining_bytes = config.max_upward_queue_size - ump_total_bytes;
+
+	let dmp_remaining_messages = <dmp::Pallet<T>>::dmq_contents(para_id)
+		.into_iter()
+		.map(|msg| msg.sent_at)
+		.collect();
+
+	let valid_watermarks = <hrmp::Pallet<T>>::valid_watermarks(para_id);
+	let hrmp_inbound = InboundHrmpLimitations { valid_watermarks };
+	let hrmp_channels_out = <hrmp::Pallet<T>>::outbound_remaining_capacity(para_id)
+		.into_iter()
+		.map(|(para, (messages_remaining, bytes_remaining))| {
+			(para, OutboundHrmpChannelLimitations { messages_remaining, bytes_remaining })
+		})
+		.collect();
+
+	let constraints = Constraints {
+		min_relay_parent_number,
+		max_pov_size: config.max_pov_size,
+		max_code_size: config.max_code_size,
+		ump_remaining,
+		ump_remaining_bytes,
+		max_ump_num_per_candidate: config.max_upward_message_num_per_candidate,
+		dmp_remaining_messages,
+		hrmp_inbound,
+		hrmp_channels_out,
+		max_hrmp_num_per_candidate: config.hrmp_max_message_num_per_candidate,
+		required_parent,
+		validation_code_hash,
+		upgrade_restriction,
+		future_validation_code,
+	};
+
+	let pending_availability = {
+		// Note: the API deals with a `Vec` as it is future-proof for cases
+		// where there may be multiple candidates pending availability at a time.
+		// But at the moment only one candidate can be pending availability per
+		// parachain.
+		crate::inclusion::PendingAvailability::<T>::get(&para_id)
+			.and_then(|pending| {
+				let commitments =
+					crate::inclusion::PendingAvailabilityCommitments::<T>::get(&para_id);
+				commitments.map(move |c| (pending, c))
+			})
+			.map(|(pending, commitments)| {
+				CandidatePendingAvailability {
+					candidate_hash: pending.candidate_hash(),
+					descriptor: pending.candidate_descriptor().clone(),
+					commitments,
+					relay_parent_number: pending.relay_parent_number(),
+					max_pov_size: constraints.max_pov_size, // assume always same in session.
+				}
+			})
+			.into_iter()
+			.collect()
+	};
+
+	Some(BackingState { constraints, pending_availability })
+}
+
+/// Implementation for `AsyncBackingParams` function from the runtime API
+pub fn async_backing_params<T: configuration::Config>() -> AsyncBackingParams {
+	<configuration::Pallet<T>>::config().async_backing_params
+}
diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs
index deef19d9071..d01b543630c 100644
--- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs
+++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs
@@ -15,111 +15,3 @@
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
 //! Put implementations of functions from staging APIs here.
-
-use crate::{configuration, dmp, hrmp, inclusion, initializer, paras, shared};
-use frame_system::pallet_prelude::BlockNumberFor;
-use primitives::{
-	vstaging::{
-		AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints,
-		InboundHrmpLimitations, OutboundHrmpChannelLimitations,
-	},
-	Id as ParaId,
-};
-use sp_std::prelude::*;
-
-/// Implementation for `StagingParaBackingState` function from the runtime API
-pub fn backing_state<T: initializer::Config>(
-	para_id: ParaId,
-) -> Option<BackingState<T::Hash, BlockNumberFor<T>>> {
-	let config = <configuration::Pallet<T>>::config();
-	// Async backing is only expected to be enabled with a tracker capacity of 1.
-	// Subsequent configuration update gets applied on new session, which always
-	// clears the buffer.
-	//
-	// Thus, minimum relay parent is ensured to have asynchronous backing enabled.
-	let now = <frame_system::Pallet<T>>::block_number();
-	let min_relay_parent_number = <shared::Pallet<T>>::allowed_relay_parents()
-		.hypothetical_earliest_block_number(now, config.async_backing_params.allowed_ancestry_len);
-
-	let required_parent = <paras::Pallet<T>>::para_head(para_id)?;
-	let validation_code_hash = <paras::Pallet<T>>::current_code_hash(para_id)?;
-
-	let upgrade_restriction = <paras::Pallet<T>>::upgrade_restriction_signal(para_id);
-	let future_validation_code =
-		<paras::Pallet<T>>::future_code_upgrade_at(para_id).and_then(|block_num| {
-			// Only read the storage if there's a pending upgrade.
-			Some(block_num).zip(<paras::Pallet<T>>::future_code_hash(para_id))
-		});
-
-	let (ump_msg_count, ump_total_bytes) =
-		<inclusion::Pallet<T>>::relay_dispatch_queue_size(para_id);
-	let ump_remaining = config.max_upward_queue_count - ump_msg_count;
-	let ump_remaining_bytes = config.max_upward_queue_size - ump_total_bytes;
-
-	let dmp_remaining_messages = <dmp::Pallet<T>>::dmq_contents(para_id)
-		.into_iter()
-		.map(|msg| msg.sent_at)
-		.collect();
-
-	let valid_watermarks = <hrmp::Pallet<T>>::valid_watermarks(para_id);
-	let hrmp_inbound = InboundHrmpLimitations { valid_watermarks };
-	let hrmp_channels_out = <hrmp::Pallet<T>>::outbound_remaining_capacity(para_id)
-		.into_iter()
-		.map(|(para, (messages_remaining, bytes_remaining))| {
-			(para, OutboundHrmpChannelLimitations { messages_remaining, bytes_remaining })
-		})
-		.collect();
-
-	let constraints = Constraints {
-		min_relay_parent_number,
-		max_pov_size: config.max_pov_size,
-		max_code_size: config.max_code_size,
-		ump_remaining,
-		ump_remaining_bytes,
-		max_ump_num_per_candidate: config.max_upward_message_num_per_candidate,
-		dmp_remaining_messages,
-		hrmp_inbound,
-		hrmp_channels_out,
-		max_hrmp_num_per_candidate: config.hrmp_max_message_num_per_candidate,
-		required_parent,
-		validation_code_hash,
-		upgrade_restriction,
-		future_validation_code,
-	};
-
-	let pending_availability = {
-		// Note: the API deals with a `Vec` as it is future-proof for cases
-		// where there may be multiple candidates pending availability at a time.
-		// But at the moment only one candidate can be pending availability per
-		// parachain.
-		crate::inclusion::PendingAvailability::<T>::get(&para_id)
-			.and_then(|pending| {
-				let commitments =
-					crate::inclusion::PendingAvailabilityCommitments::<T>::get(&para_id);
-				commitments.map(move |c| (pending, c))
-			})
-			.map(|(pending, commitments)| {
-				CandidatePendingAvailability {
-					candidate_hash: pending.candidate_hash(),
-					descriptor: pending.candidate_descriptor().clone(),
-					commitments,
-					relay_parent_number: pending.relay_parent_number(),
-					max_pov_size: constraints.max_pov_size, // assume always same in session.
-				}
-			})
-			.into_iter()
-			.collect()
-	};
-
-	Some(BackingState { constraints, pending_availability })
-}
-
-/// Implementation for `StagingAsyncBackingParams` function from the runtime API
-pub fn async_backing_params<T: configuration::Config>() -> AsyncBackingParams {
-	<configuration::Pallet<T>>::config().async_backing_params
-}
-
-/// Return the min backing votes threshold from the configuration.
-pub fn minimum_backing_votes<T: initializer::Config>() -> u32 {
-	<configuration::Pallet<T>>::config().minimum_backing_votes
-}
diff --git a/polkadot/runtime/polkadot/src/lib.rs b/polkadot/runtime/polkadot/src/lib.rs
index c9e3ded6dad..0b2dd12b154 100644
--- a/polkadot/runtime/polkadot/src/lib.rs
+++ b/polkadot/runtime/polkadot/src/lib.rs
@@ -34,7 +34,7 @@ use runtime_parachains::{
 	inclusion::{AggregateMessageOrigin, UmpQueueId},
 	initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras,
 	paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points,
-	runtime_api_impl::v5 as parachains_runtime_api_impl,
+	runtime_api_impl::v7 as parachains_runtime_api_impl,
 	scheduler as parachains_scheduler, session_info as parachains_session_info,
 	shared as parachains_shared,
 };
diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs
index c7d429bc99a..dd05082d291 100644
--- a/polkadot/runtime/rococo/src/lib.rs
+++ b/polkadot/runtime/rococo/src/lib.rs
@@ -46,7 +46,7 @@ use runtime_parachains::{
 	inclusion::{AggregateMessageOrigin, UmpQueueId},
 	initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras,
 	paras_inherent as parachains_paras_inherent,
-	runtime_api_impl::v5 as parachains_runtime_api_impl,
+	runtime_api_impl::v7 as parachains_runtime_api_impl,
 	scheduler as parachains_scheduler, session_info as parachains_session_info,
 	shared as parachains_shared,
 };
@@ -1724,6 +1724,7 @@ sp_api::impl_runtime_apis! {
 		}
 	}
 
+	#[api_version(7)]
 	impl primitives::runtime_api::ParachainHost<Block, Hash, BlockNumber> for Runtime {
 		fn validators() -> Vec<ValidatorId> {
 			parachains_runtime_api_impl::validators::<Runtime>()
@@ -1854,6 +1855,18 @@ sp_api::impl_runtime_apis! {
 				key_ownership_proof,
 			)
 		}
+
+		fn minimum_backing_votes() -> u32 {
+			parachains_runtime_api_impl::minimum_backing_votes::<Runtime>()
+		}
+
+		fn para_backing_state(para_id: ParaId) -> Option<primitives::async_backing::BackingState> {
+			parachains_runtime_api_impl::backing_state::<Runtime>(para_id)
+		}
+
+		fn async_backing_params() -> primitives::AsyncBackingParams {
+			parachains_runtime_api_impl::async_backing_params::<Runtime>()
+		}
 	}
 
 	#[api_version(3)]
diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs
index 99d6e58bc40..99fd2198400 100644
--- a/polkadot/runtime/test-runtime/src/lib.rs
+++ b/polkadot/runtime/test-runtime/src/lib.rs
@@ -30,7 +30,7 @@ use polkadot_runtime_parachains::{
 	disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp,
 	inclusion as parachains_inclusion, initializer as parachains_initializer,
 	origin as parachains_origin, paras as parachains_paras,
-	paras_inherent as parachains_paras_inherent, runtime_api_impl::v5 as runtime_impl,
+	paras_inherent as parachains_paras_inherent, runtime_api_impl::v7 as runtime_impl,
 	scheduler as parachains_scheduler, session_info as parachains_session_info,
 	shared as parachains_shared,
 };
diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index ad08360f382..e7cae7248bd 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -65,9 +65,7 @@ use runtime_parachains::{
 	inclusion::{AggregateMessageOrigin, UmpQueueId},
 	initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras,
 	paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points,
-	runtime_api_impl::{
-		v5 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl,
-	},
+	runtime_api_impl::v7 as parachains_runtime_api_impl,
 	scheduler as parachains_scheduler, session_info as parachains_session_info,
 	shared as parachains_shared,
 };
@@ -1582,7 +1580,7 @@ sp_api::impl_runtime_apis! {
 		}
 	}
 
-	#[api_version(6)]
+	#[api_version(7)]
 	impl primitives::runtime_api::ParachainHost<Block, Hash, BlockNumber> for Runtime {
 		fn validators() -> Vec<ValidatorId> {
 			parachains_runtime_api_impl::validators::<Runtime>()
@@ -1715,7 +1713,15 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn minimum_backing_votes() -> u32 {
-			parachains_staging_runtime_api_impl::minimum_backing_votes::<Runtime>()
+			parachains_runtime_api_impl::minimum_backing_votes::<Runtime>()
+		}
+
+		fn para_backing_state(para_id: ParaId) -> Option<primitives::async_backing::BackingState> {
+			parachains_runtime_api_impl::backing_state::<Runtime>(para_id)
+		}
+
+		fn async_backing_params() -> primitives::AsyncBackingParams {
+			parachains_runtime_api_impl::async_backing_params::<Runtime>()
 		}
 	}
 
diff --git a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml b/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml
deleted file mode 100644
index 918fb5bf4f6..00000000000
--- a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml
+++ /dev/null
@@ -1,34 +0,0 @@
-[settings]
-timeout = 1000
-
-[relaychain]
-default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}"
-chain = "rococo-local"
-default_command = "polkadot"
-
-  [relaychain.default_resources]
-  limits = { memory = "4G", cpu = "2" }
-  requests = { memory = "2G", cpu = "1" }
-
-  [[relaychain.nodes]]
-  name = "alice"
-  args = [ "-lparachain=debug,runtime=debug"]
-
-  [[relaychain.nodes]]
-  name = "bob"
-  image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}"
-  args = [ "-lparachain=debug,runtime=debug"]
-
-[[parachains]]
-id = 100
-
-  [parachains.collator]
-  name = "collator01"
-  image = "{{COL_IMAGE}}"
-  command = "undying-collator"
-  args = ["-lparachain=debug"]
-
-[types.Header]
-number = "u64"
-parent_hash = "Hash"
-post_state = "Hash"
diff --git a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl b/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl
deleted file mode 100644
index 46c1d77acf4..00000000000
--- a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl
+++ /dev/null
@@ -1,23 +0,0 @@
-Description: Async Backing Compatibility Test
-Network: ./001-async-backing-compatibility.toml
-Creds: config
-
-# General
-alice: is up
-bob: is up
-
-# Check authority status 
-alice: reports node_roles is 4
-bob: reports node_roles is 4
-
-# Check peers
-alice: reports peers count is at least 2 within 20 seconds
-bob: reports peers count is at least 2 within 20 seconds
-
-# Parachain registration
-alice: parachain 100 is registered within 225 seconds
-bob: parachain 100 is registered within 225 seconds
-
-# Ensure parachain progress
-alice: parachain 100 block height is at least 10 within 250 seconds
-bob: parachain 100 block height is at least 10 within 250 seconds
diff --git a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml b/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml
deleted file mode 100644
index e61f7dd47ef..00000000000
--- a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml
+++ /dev/null
@@ -1,54 +0,0 @@
-[settings]
-timeout = 1000
-
-[relaychain]
-default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}"
-chain = "rococo-local"
-default_command = "polkadot"
-
-  [relaychain.default_resources]
-  limits = { memory = "4G", cpu = "2" }
-  requests = { memory = "2G", cpu = "1" }
-
-  [[relaychain.nodes]]
-  name = "alice"
-  args = [ "-lparachain=debug,runtime=debug"]
-
-  [[relaychain.nodes]]
-  name = "bob"
-  args = [ "-lparachain=debug,runtime=debug"]
-
-  [[relaychain.nodes]]
-  name = "charlie"
-  image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}"
-  args = [ "-lparachain=debug,runtime=debug"]
-
-  [[relaychain.nodes]]
-  name = "dave"
-  image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}"
-  args = [ "-lparachain=debug,runtime=debug"]
-
-[[parachains]]
-id = 100
-addToGenesis = true
-
-  [parachains.collator]
-  name = "collator02"
-  image = "{{COL_IMAGE}}"
-  command = "undying-collator"
-  args = ["-lparachain=debug"]
-
-[[parachains]]
-id = 101
-addToGenesis = true
-
-  [parachains.collator]
-  name = "collator02"
-  image = "{{COL_IMAGE}}"
-  command = "undying-collator"
-  args = ["-lparachain=debug"]
-
-[types.Header]
-number = "u64"
-parent_hash = "Hash"
-post_state = "Hash"
diff --git a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl b/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl
deleted file mode 100644
index 6213d1afb81..00000000000
--- a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl
+++ /dev/null
@@ -1,34 +0,0 @@
-Description: Async Backing Runtime Upgrade Test
-Network: ./002-async-backing-runtime-upgrade.toml
-Creds: config
-
-# General
-alice: is up
-bob: is up
-charlie: is up
-dave: is up
-
-# Check peers
-alice: reports peers count is at least 3 within 20 seconds
-bob: reports peers count is at least 3 within 20 seconds
-
-# Parachain registration
-alice: parachain 100 is registered within 225 seconds
-bob: parachain 100 is registered within 225 seconds
-charlie: parachain 100 is registered within 225 seconds
-dave: parachain 100 is registered within 225 seconds
-alice: parachain 101 is registered within 225 seconds
-bob: parachain 101 is registered within 225 seconds
-charlie: parachain 101 is registered within 225 seconds
-dave: parachain 101 is registered within 225 seconds
-
-# Ensure parachain progress
-alice: parachain 100 block height is at least 10 within 250 seconds
-bob: parachain 100 block height is at least 10 within 250 seconds
-
-# Runtime upgrade (according to previous runtime tests, avg. is 30s)
-alice: run ../misc/0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_BIN_URL}}" within 40 seconds
-bob: run ../misc/0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_BIN_URL}}" within 40 seconds
-
-# Bootstrap the runtime upgrade
-sleep 30 seconds
diff --git a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml b/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml
deleted file mode 100644
index 4dca4d3d531..00000000000
--- a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml
+++ /dev/null
@@ -1,40 +0,0 @@
-[settings]
-timeout = 1000
-
-[relaychain]
-default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}"
-chain = "rococo-local"
-default_command = "polkadot"
-
-  [relaychain.default_resources]
-  limits = { memory = "4G", cpu = "2" }
-  requests = { memory = "2G", cpu = "1" }
-
-  [[relaychain.nodes]]
-  name = "alice"
-  args = [ "-lparachain=debug"]
-
-  [[relaychain.nodes]]
-  name = "bob"
-  image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}"
-  args = [ "-lparachain=debug"]
-
-[[parachains]]
-id = 100
-
-  [[parachains.collators]]
-  name = "collator01"
-  image = "docker.io/paritypr/colander:master"
-  command = "undying-collator"
-  args = ["-lparachain=debug"]
-
-  [[parachains.collators]]
-  name = "collator02"
-  image = "{{COL_IMAGE}}"
-  command = "undying-collator"
-  args = ["-lparachain=debug"]
-
-[types.Header]
-number = "u64"
-parent_hash = "Hash"
-post_state = "Hash"
diff --git a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl b/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl
deleted file mode 100644
index 98436b0459c..00000000000
--- a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl
+++ /dev/null
@@ -1,19 +0,0 @@
-Description: Async Backing Collator Mix Test
-Network: ./003-async-backing-collator-mix.toml
-Creds: config
-
-# General
-alice: is up
-bob: is up
-
-# Check peers
-alice: reports peers count is at least 3 within 20 seconds
-bob: reports peers count is at least 3 within 20 seconds
-
-# Parachain registration
-alice: parachain 100 is registered within 225 seconds
-bob: parachain 100 is registered within 225 seconds
-
-# Ensure parachain progress
-alice: parachain 100 block height is at least 10 within 250 seconds
-bob: parachain 100 block height is at least 10 within 250 seconds
diff --git a/polkadot/zombienet_tests/async_backing/README.md b/polkadot/zombienet_tests/async_backing/README.md
deleted file mode 100644
index 9774ea3c25c..00000000000
--- a/polkadot/zombienet_tests/async_backing/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# async-backing zombienet tests
-
-This directory contains zombienet tests made explicitly for the async-backing feature branch.
-
-## coverage
-
-- Network protocol upgrade deploying both master and async branch (compatibility).
-- Runtime ugprade while running both master and async backing branch nodes.
-- Async backing test with a mix of collators collating via async backing and sync backing.
diff --git a/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml b/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml
index 0becb408550..d72e3ebdb33 100644
--- a/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml
+++ b/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml
@@ -30,8 +30,8 @@ cumulus_based = true
 
   [parachains.collator]
   name = "collator01"
-  image = "{{COL_IMAGE}}"
-  command = "polkadot-collator"
+  image = "{{CUMULUS_IMAGE}}"
+  command = "polkadot-parachain"
 
   [[parachains.collator.env]]
   name = "RUST_LOG"
-- 
GitLab