From 5988794ac62a2d582857c0277cd0a0b592b4e661 Mon Sep 17 00:00:00 2001
From: Kian Paimani <5588131+kianenigma@users.noreply.github.com>
Date: Fri, 14 Feb 2025 23:47:22 +0000
Subject: [PATCH] [AHM] Multi-block staking election pallet (#7282)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

## Multi Block Election Pallet

This PR adds the first iteration of the multi-block staking pallet.

From this point onwards, the staking and its election provider pallets
are being customized to work in AssetHub. While usage in solo-chains is
still possible, it is not longer the main focus of this pallet. For a
safer usage, please fork and user an older version of this pallet.

---

## Replaces

- [x] https://github.com/paritytech/polkadot-sdk/pull/6034
- [x] https://github.com/paritytech/polkadot-sdk/pull/5272

## Related PRs:

- [x] https://github.com/paritytech/polkadot-sdk/pull/7483
- [ ] https://github.com/paritytech/polkadot-sdk/pull/7357
- [ ] https://github.com/paritytech/polkadot-sdk/pull/7424
- [ ] https://github.com/paritytech/polkadot-staking-miner/pull/955

This branch can be periodically merged into
https://github.com/paritytech/polkadot-sdk/pull/7358 ->
https://github.com/paritytech/polkadot-sdk/pull/6996

## TODOs:

- [x] rebase to master
- Benchmarking for staking critical path
  - [x] snapshot
  - [x] election result
- Benchmarking for EPMB critical path
  - [x] snapshot
  - [x] verification
  - [x] submission
  - [x] unsigned submission
  - [ ] election results fetching
- [ ] Fix deletion weights. Either of
  - [ ] Garbage collector + lazy removal of all paged storage items
  - [ ] Confirm that deletion is small PoV footprint.
- [ ] Move election prediction to be push based. @tdimitrov
- [ ] integrity checks for bounds
- [ ] Properly benchmark this as a part of CI -- for now I will remove
them as they are too slow
- [x] add try-state to all pallets
- [x] Staking to allow genesis dev accounts to be created internally
- [x] Decouple miner config so @niklasad1 can work on the miner
72841b731727e69db38f9bd616190aa8d50a56ba
- [x] duplicate snapshot page reported by @niklasad1
- [ ] https://github.com/paritytech/polkadot-sdk/pull/6520 or equivalent
-- during snapshot, `VoterList` must be locked
- [ ] Move target snapshot to a separate block

---------

Co-authored-by: Gonçalo Pestana <g6pestana@gmail.com>
Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com>
Co-authored-by: command-bot <>
Co-authored-by: Guillaume Thiolliere <gui.thiolliere@gmail.com>
Co-authored-by: Giuseppe Re <giuseppe.re@parity.io>
Co-authored-by: cmd[bot] <41898282+github-actions[bot]@users.noreply.github.com>
---
 .github/workflows/runtimes-matrix.json        |    2 +-
 .github/workflows/tests.yml                   |    2 +-
 Cargo.lock                                    |   28 +
 Cargo.toml                                    |    1 +
 .../chains/relays/westend/src/genesis.rs      |   10 +-
 polkadot/runtime/common/src/try_runtime.rs    |    2 +-
 .../src/disputes/slashing/benchmarking.rs     |    4 +
 polkadot/runtime/test-runtime/src/lib.rs      |   11 +-
 .../westend/src/genesis_config_presets.rs     |   12 +-
 polkadot/runtime/westend/src/lib.rs           |   23 +-
 .../src/weights/pallet_fast_unstake.rs        |    2 -
 .../westend/src/weights/pallet_staking.rs     |   67 +-
 prdoc/pr_6034.prdoc                           |   25 +
 prdoc/pr_6689.prdoc                           |    7 +-
 prdoc/pr_7042.prdoc                           |    4 +-
 prdoc/pr_7282.prdoc                           |   72 +
 .../frame-umbrella-weight-template.hbs        |   17 +
 substrate/.maintain/frame-weight-template.hbs |   17 +
 substrate/bin/node/cli/Cargo.toml             |    3 +
 substrate/bin/node/cli/src/chain_spec.rs      |   32 +-
 .../cli/tests/res/default_genesis_config.json |    1 +
 substrate/bin/node/runtime/Cargo.toml         |    6 +
 substrate/bin/node/runtime/src/constants.rs   |    3 +-
 substrate/bin/node/runtime/src/lib.rs         |  333 ++-
 substrate/bin/node/testing/src/genesis.rs     |    5 +-
 substrate/frame/babe/src/mock.rs              |   10 +-
 .../bags-list/remote-tests/src/snapshot.rs    |    8 +-
 substrate/frame/bags-list/src/benchmarks.rs   |  119 +
 substrate/frame/bags-list/src/lib.rs          |    2 +-
 substrate/frame/bags-list/src/list/mod.rs     |   11 +-
 substrate/frame/beefy/Cargo.toml              |    1 +
 substrate/frame/beefy/src/mock.rs             |   15 +-
 substrate/frame/beefy/src/tests.rs            |    2 +
 substrate/frame/benchmarking/src/lib.rs       |    2 +-
 substrate/frame/delegated-staking/src/mock.rs |   10 +-
 .../election-provider-multi-block/Cargo.toml  |   84 +
 .../src/benchmarking.rs                       |  170 ++
 .../src/helpers.rs                            |  227 ++
 .../election-provider-multi-block/src/lib.rs  | 2556 +++++++++++++++++
 .../src/mock/mod.rs                           |  700 +++++
 .../src/mock/signed.rs                        |  255 ++
 .../src/mock/staking.rs                       |  238 ++
 .../src/mock/weight_info.rs                   |   85 +
 .../src/signed/benchmarking.rs                |  171 ++
 .../src/signed/mod.rs                         |  858 ++++++
 .../src/signed/tests.rs                       |  554 ++++
 .../src/types.rs                              |  363 +++
 .../src/unsigned/benchmarking.rs              |   79 +
 .../src/unsigned/miner.rs                     | 1972 +++++++++++++
 .../src/unsigned/mod.rs                       |  633 ++++
 .../src/verifier/benchmarking.rs              |  234 ++
 .../src/verifier/impls.rs                     |  955 ++++++
 .../src/verifier/mod.rs                       |  271 ++
 .../src/verifier/tests.rs                     | 1266 ++++++++
 .../src/weights/measured/mod.rs               |   21 +
 .../pallet_election_provider_multi_block.rs   |  364 +++
 ...et_election_provider_multi_block_signed.rs |  272 ++
 ..._election_provider_multi_block_unsigned.rs |  153 +
 ..._election_provider_multi_block_verifier.rs |  361 +++
 .../src/weights/mel/mod.rs                    |   21 +
 .../pallet_election_provider_multi_block.rs   |  362 +++
 ...et_election_provider_multi_block_signed.rs |  270 ++
 ..._election_provider_multi_block_unsigned.rs |  151 +
 ..._election_provider_multi_block_verifier.rs |  359 +++
 .../src/weights/mod.rs                        |   22 +
 .../src/weights/zero.rs                       |   89 +
 .../src/benchmarking.rs                       |   21 +-
 .../election-provider-multi-phase/src/lib.rs  |  315 +-
 .../election-provider-multi-phase/src/mock.rs |   62 +-
 .../src/signed.rs                             |    8 +-
 .../src/unsigned.rs                           |  285 +-
 .../test-staking-e2e/src/mock.rs              |   19 +-
 .../election-provider-support/Cargo.toml      |    3 +
 .../benchmarking/src/inner.rs                 |    2 +-
 .../solution-type/fuzzer/src/compact.rs       |    3 +-
 .../solution-type/src/codec.rs                |    1 +
 .../solution-type/src/single_page.rs          |   90 +-
 .../election-provider-support/src/bounds.rs   |   10 +
 .../election-provider-support/src/lib.rs      |  622 +++-
 .../election-provider-support/src/onchain.rs  |  315 +-
 .../election-provider-support/src/tests.rs    |   30 +-
 .../election-provider-support/src/traits.rs   |   23 +
 .../elections-phragmen/src/benchmarking.rs    |   12 +-
 substrate/frame/fast-unstake/src/mock.rs      |   26 +-
 substrate/frame/grandpa/Cargo.toml            |    1 +
 substrate/frame/grandpa/src/mock.rs           |   14 +-
 .../nomination-pools/benchmarking/src/mock.rs |    2 +-
 .../test-delegate-stake/src/mock.rs           |    2 +-
 .../frame/offences/benchmarking/src/mock.rs   |    5 +-
 substrate/frame/root-offences/src/mock.rs     |   10 +-
 .../frame/session/benchmarking/src/inner.rs   |    2 +
 .../frame/session/benchmarking/src/mock.rs    |    5 +-
 substrate/frame/session/src/lib.rs            |   11 +-
 substrate/frame/staking/Cargo.toml            |    9 +-
 substrate/frame/staking/src/benchmarking.rs   |  316 +-
 substrate/frame/staking/src/lib.rs            |  259 +-
 substrate/frame/staking/src/migrations.rs     |   33 +-
 substrate/frame/staking/src/mock.rs           |  135 +-
 substrate/frame/staking/src/pallet/impls.rs   |  655 +++--
 substrate/frame/staking/src/pallet/mod.rs     |  417 ++-
 substrate/frame/staking/src/slashing.rs       |   16 +-
 substrate/frame/staking/src/tests.rs          |  616 ++--
 .../frame/staking/src/tests_paged_election.rs |  971 +++++++
 substrate/frame/staking/src/weights.rs        |  136 +-
 .../construct_runtime/expand/outer_enums.rs   |    2 +-
 .../procedural/src/pallet/expand/event.rs     |    2 +-
 substrate/frame/support/src/lib.rs            |    1 +
 .../deprecated_where_block.stderr             |   16 +-
 .../primitives/npos-elections/src/helpers.rs  |   28 +-
 .../primitives/npos-elections/src/lib.rs      |   71 +-
 .../primitives/npos-elections/src/phragmen.rs |    4 +-
 .../primitives/npos-elections/src/phragmms.rs |    2 +-
 substrate/primitives/staking/src/lib.rs       |  192 +-
 substrate/primitives/staking/src/offence.rs   |   12 +-
 .../benchmarking-cli/src/pallet/command.rs    |    6 +-
 .../frame/benchmarking-cli/src/pallet/mod.rs  |    2 +-
 umbrella/Cargo.toml                           |   10 +-
 umbrella/src/lib.rs                           |    4 +
 118 files changed, 19048 insertions(+), 1751 deletions(-)
 create mode 100644 prdoc/pr_6034.prdoc
 create mode 100644 prdoc/pr_7282.prdoc
 create mode 100644 substrate/frame/election-provider-multi-block/Cargo.toml
 create mode 100644 substrate/frame/election-provider-multi-block/src/benchmarking.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/helpers.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/lib.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/mock/mod.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/mock/signed.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/mock/staking.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/mock/weight_info.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/signed/benchmarking.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/signed/mod.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/signed/tests.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/types.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/unsigned/benchmarking.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/unsigned/miner.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/unsigned/mod.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/verifier/benchmarking.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/verifier/impls.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/verifier/mod.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/verifier/tests.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/weights/measured/mod.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_signed.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_unsigned.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_verifier.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/weights/mel/mod.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_signed.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_unsigned.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_verifier.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/weights/mod.rs
 create mode 100644 substrate/frame/election-provider-multi-block/src/weights/zero.rs
 create mode 100644 substrate/frame/staking/src/tests_paged_election.rs

diff --git a/.github/workflows/runtimes-matrix.json b/.github/workflows/runtimes-matrix.json
index 5f981565377..8999491a20a 100644
--- a/.github/workflows/runtimes-matrix.json
+++ b/.github/workflows/runtimes-matrix.json
@@ -6,7 +6,7 @@
     "header": "substrate/HEADER-APACHE2",
     "template": "substrate/.maintain/frame-weight-template.hbs",
     "bench_features": "runtime-benchmarks",
-    "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage",
+    "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage,pallet_election_provider_multi_block,pallet_election_provider_multi_block::signed,pallet_election_provider_multi_block::unsigned,pallet_election_provider_multi_block::verifier",
     "uri": null,
     "is_relay": false
   },
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 6d6e393b041..ba0574b51e6 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -32,7 +32,7 @@ jobs:
       - name: Checkout
         uses: actions/checkout@v4
       - name: script
-        run: forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks --quiet -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet
+        run: forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks --quiet -- benchmark pallet --chain dev --pallet "*" --exclude-pallets=pallet_election_provider_multi_block,pallet_election_provider_multi_block::signed,pallet_election_provider_multi_block::unsigned,pallet_election_provider_multi_block::verifier --extrinsic "*" --steps 2 --repeat 1 --quiet
 
   # cf https://github.com/paritytech/polkadot-sdk/issues/1652
   test-syscalls:
diff --git a/Cargo.lock b/Cargo.lock
index cd323584c9d..83ce22d1cc9 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -7324,6 +7324,7 @@ dependencies = [
  "sp-io 30.0.0",
  "sp-npos-elections 26.0.0",
  "sp-runtime 31.0.1",
+ "sp-std 14.0.0",
 ]
 
 [[package]]
@@ -12465,6 +12466,7 @@ dependencies = [
  "sp-session 27.0.0",
  "sp-staking 26.0.0",
  "sp-state-machine 0.35.0",
+ "sp-tracing 16.0.0",
 ]
 
 [[package]]
@@ -13376,6 +13378,29 @@ dependencies = [
  "sp-tracing 16.0.0",
 ]
 
+[[package]]
+name = "pallet-election-provider-multi-block"
+version = "0.9.0"
+dependencies = [
+ "frame-benchmarking 28.0.0",
+ "frame-election-provider-support 28.0.0",
+ "frame-support 28.0.0",
+ "frame-system 28.0.0",
+ "log",
+ "pallet-balances 28.0.0",
+ "parity-scale-codec",
+ "parking_lot 0.12.3",
+ "rand",
+ "scale-info",
+ "sp-arithmetic 23.0.0",
+ "sp-core 28.0.0",
+ "sp-io 30.0.0",
+ "sp-npos-elections 26.0.0",
+ "sp-runtime 31.0.1",
+ "sp-std 14.0.0",
+ "sp-tracing 16.0.0",
+]
+
 [[package]]
 name = "pallet-election-provider-multi-phase"
 version = "27.0.0"
@@ -13765,6 +13790,7 @@ dependencies = [
  "sp-runtime 31.0.1",
  "sp-session 27.0.0",
  "sp-staking 26.0.0",
+ "sp-tracing 16.0.0",
 ]
 
 [[package]]
@@ -15453,6 +15479,7 @@ dependencies = [
  "pallet-staking-reward-curve",
  "pallet-timestamp 27.0.0",
  "parity-scale-codec",
+ "rand",
  "rand_chacha",
  "scale-info",
  "serde",
@@ -18760,6 +18787,7 @@ dependencies = [
  "pallet-delegated-staking 1.0.0",
  "pallet-democracy 28.0.0",
  "pallet-dev-mode 10.0.0",
+ "pallet-election-provider-multi-block",
  "pallet-election-provider-multi-phase 27.0.0",
  "pallet-election-provider-support-benchmarking 27.0.0",
  "pallet-elections-phragmen 29.0.0",
diff --git a/Cargo.toml b/Cargo.toml
index 2b635d5966b..ba8af51c3a5 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -344,6 +344,7 @@ members = [
 	"substrate/frame/core-fellowship",
 	"substrate/frame/delegated-staking",
 	"substrate/frame/democracy",
+	"substrate/frame/election-provider-multi-block",
 	"substrate/frame/election-provider-multi-phase",
 	"substrate/frame/election-provider-multi-phase/test-staking-e2e",
 	"substrate/frame/election-provider-support",
diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs
index 2f02ca5f193..4dc45cf7aec 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs
@@ -19,7 +19,7 @@ use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
 use sp_consensus_babe::AuthorityId as BabeId;
 use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId;
 use sp_core::storage::Storage;
-use sp_runtime::Perbill;
+use sp_runtime::{BoundedVec, Perbill};
 
 // Polkadot
 use polkadot_primitives::{AssignmentId, ValidatorId};
@@ -87,7 +87,13 @@ pub fn genesis() -> Storage {
 				.iter()
 				.map(|x| (x.0.clone(), x.1.clone(), STASH, pallet_staking::StakerStatus::Validator))
 				.collect(),
-			invulnerables: validators::initial_authorities().iter().map(|x| x.0.clone()).collect(),
+			invulnerables: BoundedVec::try_from(
+				validators::initial_authorities()
+					.iter()
+					.map(|x| x.0.clone())
+					.collect::<Vec<_>>(),
+			)
+			.expect("Limit for staking invulnerables must be less than initial authorities."),
 			force_era: pallet_staking::Forcing::ForceNone,
 			slash_reward_fraction: Perbill::from_percent(10),
 			..Default::default()
diff --git a/polkadot/runtime/common/src/try_runtime.rs b/polkadot/runtime/common/src/try_runtime.rs
index b22e1703292..795249dde20 100644
--- a/polkadot/runtime/common/src/try_runtime.rs
+++ b/polkadot/runtime/common/src/try_runtime.rs
@@ -36,7 +36,7 @@ where
 
 	let all_stakers = Ledger::<T>::iter().map(|(ctrl, l)| (ctrl, l.stash)).collect::<BTreeSet<_>>();
 	let mut all_exposed = BTreeSet::new();
-	ErasStakers::<T>::iter().for_each(|(_, val, expo)| {
+	ErasStakersPaged::<T>::iter().for_each(|((_era, val, _page), expo)| {
 		all_exposed.insert(val);
 		all_exposed.extend(expo.others.iter().map(|ie| ie.who.clone()))
 	});
diff --git a/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs b/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs
index bfd46d75243..68d9ee44527 100644
--- a/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs
+++ b/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs
@@ -82,8 +82,12 @@ where
 
 	pallet_session::Pallet::<T>::on_initialize(BlockNumberFor::<T>::one());
 	initializer::Pallet::<T>::on_initialize(BlockNumberFor::<T>::one());
+
 	// skip sessions until the new validator set is enacted
 	while pallet_session::Pallet::<T>::validators().len() < n as usize {
+		// initialize stakers in pallet_staking. This is suboptimal, but an easy way to avoid this
+		// being an infinite loop.
+		pallet_staking::Pallet::<T>::populate_staking_election_testing_benchmarking_only().unwrap();
 		pallet_session::Pallet::<T>::rotate_session();
 	}
 	initializer::Pallet::<T>::on_finalize(BlockNumberFor::<T>::one());
diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs
index f592dc2b61d..c0985873532 100644
--- a/polkadot/runtime/test-runtime/src/lib.rs
+++ b/polkadot/runtime/test-runtime/src/lib.rs
@@ -79,7 +79,7 @@ use polkadot_runtime_common::{
 use polkadot_runtime_parachains::reward_points::RewardValidatorsWithEraPoints;
 use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
 use sp_consensus_beefy::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature};
-use sp_core::{ConstU32, OpaqueMetadata};
+use sp_core::{ConstBool, ConstU32, OpaqueMetadata};
 use sp_mmr_primitives as mmr;
 use sp_runtime::{
 	curve::PiecewiseLinear,
@@ -348,7 +348,7 @@ parameter_types! {
 	pub const MaxExposurePageSize: u32 = 64;
 	pub const MaxNominators: u32 = 256;
 	pub const MaxAuthorities: u32 = 100_000;
-	pub const OnChainMaxWinners: u32 = u32::MAX;
+	pub const OnChainMaxWinners: u32 = MaxAuthorities::get();
 	// Unbounded number of election targets and voters.
 	pub ElectionBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build();
 }
@@ -361,7 +361,9 @@ impl onchain::Config for OnChainSeqPhragmen {
 	type DataProvider = Staking;
 	type WeightInfo = ();
 	type Bounds = ElectionBoundsOnChain;
-	type MaxWinners = OnChainMaxWinners;
+	type MaxWinnersPerPage = OnChainMaxWinners;
+	type MaxBackersPerWinner = ConstU32<{ u32::MAX }>;
+	type Sort = ConstBool<true>;
 }
 
 /// Upper limit on the number of NPOS nominations.
@@ -400,6 +402,9 @@ impl pallet_staking::Config for Runtime {
 	type EventListeners = ();
 	type WeightInfo = ();
 	type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy;
+	type MaxValidatorSet = MaxAuthorities;
+	type MaxInvulnerables = ConstU32<20>;
+	type MaxDisabledValidators = ConstU32<100>;
 }
 
 parameter_types! {
diff --git a/polkadot/runtime/westend/src/genesis_config_presets.rs b/polkadot/runtime/westend/src/genesis_config_presets.rs
index 76c0ce015c0..af5e3607df4 100644
--- a/polkadot/runtime/westend/src/genesis_config_presets.rs
+++ b/polkadot/runtime/westend/src/genesis_config_presets.rs
@@ -33,7 +33,7 @@ use sp_consensus_grandpa::AuthorityId as GrandpaId;
 use sp_core::{crypto::get_public_from_string_or_panic, sr25519};
 use sp_genesis_builder::PresetId;
 use sp_keyring::Sr25519Keyring;
-use sp_runtime::Perbill;
+use sp_runtime::{BoundedVec, Perbill};
 use westend_runtime_constants::currency::UNITS as WND;
 
 /// Helper function to generate stash, controller and session key from seed
@@ -202,7 +202,10 @@ fn westend_testnet_genesis(
 				.iter()
 				.map(|x| (x.0.clone(), x.0.clone(), STASH, StakerStatus::<AccountId>::Validator))
 				.collect::<Vec<_>>(),
-			invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect::<Vec<_>>(),
+			invulnerables: BoundedVec::try_from(
+					initial_authorities.iter().map(|x| x.0.clone()).collect::<Vec<_>>()
+				)
+				.expect("Too many invulnerable validators: upper limit is MaxInvulnerables from pallet staking config"),
 			force_era: Forcing::NotForcing,
 			slash_reward_fraction: Perbill::from_percent(10),
 		},
@@ -373,7 +376,10 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value {
 				.iter()
 				.map(|x| (x.0.clone(), x.0.clone(), STASH, StakerStatus::<AccountId>::Validator))
 				.collect::<Vec<_>>(),
-			invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect::<Vec<_>>(),
+			invulnerables: BoundedVec::try_from(
+					initial_authorities.iter().map(|x| x.0.clone()).collect::<Vec<_>>()
+				)
+				.expect("Too many invulnerable validators: upper limit is MaxInvulnerables from pallet staking config"),
 			force_era: Forcing::ForceNone,
 			slash_reward_fraction: Perbill::from_percent(10),
 		},
diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index cade86e5866..5d7a8f51625 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -96,7 +96,7 @@ use sp_consensus_beefy::{
 	ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature},
 	mmr::{BeefyDataProvider, MmrLeafVersion},
 };
-use sp_core::{ConstU8, OpaqueMetadata, RuntimeDebug, H256};
+use sp_core::{ConstBool, ConstU8, OpaqueMetadata, RuntimeDebug, H256};
 use sp_runtime::{
 	generic, impl_opaque_keys,
 	traits::{
@@ -585,7 +585,10 @@ parameter_types! {
 		ElectionBoundsBuilder::default().voters_count(MaxElectingVoters::get().into()).build();
 	// Maximum winners that can be chosen as active validators
 	pub const MaxActiveValidators: u32 = 1000;
-
+	// One page only, fill the whole page with the `MaxActiveValidators`.
+	pub const MaxWinnersPerPage: u32 = MaxActiveValidators::get();
+	// Unbonded, thus the max backers per winner maps to the max electing voters limit.
+	pub const MaxBackersPerWinner: u32 = MaxElectingVoters::get();
 }
 
 frame_election_provider_support::generate_solution_type!(
@@ -600,12 +603,14 @@ frame_election_provider_support::generate_solution_type!(
 
 pub struct OnChainSeqPhragmen;
 impl onchain::Config for OnChainSeqPhragmen {
+	type Sort = ConstBool<true>;
 	type System = Runtime;
 	type Solver = SequentialPhragmen<AccountId, OnChainAccuracy>;
 	type DataProvider = Staking;
 	type WeightInfo = weights::frame_election_provider_support::WeightInfo<Runtime>;
-	type MaxWinners = MaxActiveValidators;
 	type Bounds = ElectionBounds;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
+	type MaxWinnersPerPage = MaxWinnersPerPage;
 }
 
 impl pallet_election_provider_multi_phase::MinerConfig for Runtime {
@@ -618,7 +623,8 @@ impl pallet_election_provider_multi_phase::MinerConfig for Runtime {
     as
     frame_election_provider_support::ElectionDataProvider
     >::MaxVotesPerVoter;
-	type MaxWinners = MaxActiveValidators;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
+	type MaxWinners = MaxWinnersPerPage;
 
 	// The unsigned submissions have to respect the weight of the submit_unsigned call, thus their
 	// weight estimate function is wired to this call's weight.
@@ -652,6 +658,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime {
 	type BetterSignedThreshold = ();
 	type OffchainRepeat = OffchainRepeat;
 	type MinerTxPriority = NposSolutionPriority;
+	type MaxWinners = MaxWinnersPerPage;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
 	type DataProvider = Staking;
 	#[cfg(any(feature = "fast-runtime", feature = "runtime-benchmarks"))]
 	type Fallback = onchain::OnChainExecution<OnChainSeqPhragmen>;
@@ -660,7 +668,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime {
 		AccountId,
 		BlockNumber,
 		Staking,
-		MaxActiveValidators,
+		MaxWinnersPerPage,
+		MaxBackersPerWinner,
 	)>;
 	type GovernanceFallback = onchain::OnChainExecution<OnChainSeqPhragmen>;
 	type Solver = SequentialPhragmen<
@@ -671,7 +680,6 @@ impl pallet_election_provider_multi_phase::Config for Runtime {
 	type BenchmarkingConfig = polkadot_runtime_common::elections::BenchmarkConfig;
 	type ForceOrigin = EnsureRoot<AccountId>;
 	type WeightInfo = weights::pallet_election_provider_multi_phase::WeightInfo<Self>;
-	type MaxWinners = MaxActiveValidators;
 	type ElectionBounds = ElectionBounds;
 }
 
@@ -753,6 +761,7 @@ impl pallet_staking::Config for Runtime {
 	type GenesisElectionProvider = onchain::OnChainExecution<OnChainSeqPhragmen>;
 	type VoterList = VoterList;
 	type TargetList = UseValidatorsMap<Self>;
+	type MaxValidatorSet = MaxActiveValidators;
 	type NominationsQuota = pallet_staking::FixedNominationsQuota<{ MaxNominations::get() }>;
 	type MaxUnlockingChunks = frame_support::traits::ConstU32<32>;
 	type HistoryDepth = frame_support::traits::ConstU32<84>;
@@ -761,6 +770,8 @@ impl pallet_staking::Config for Runtime {
 	type EventListeners = (NominationPools, DelegatedStaking);
 	type WeightInfo = weights::pallet_staking::WeightInfo<Runtime>;
 	type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy;
+	type MaxInvulnerables = frame_support::traits::ConstU32<20>;
+	type MaxDisabledValidators = ConstU32<100>;
 }
 
 impl pallet_fast_unstake::Config for Runtime {
diff --git a/polkadot/runtime/westend/src/weights/pallet_fast_unstake.rs b/polkadot/runtime/westend/src/weights/pallet_fast_unstake.rs
index 8c061688fc6..dafac66f9d7 100644
--- a/polkadot/runtime/westend/src/weights/pallet_fast_unstake.rs
+++ b/polkadot/runtime/westend/src/weights/pallet_fast_unstake.rs
@@ -108,8 +108,6 @@ impl<T: frame_system::Config> pallet_fast_unstake::WeightInfo for WeightInfo<T>
 	/// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured)
 	/// Storage: Staking CurrentEra (r:1 w:0)
 	/// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
-	/// Storage: Staking ErasStakers (r:257 w:0)
-	/// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured)
 	/// The range of component `v` is `[1, 256]`.
 	/// The range of component `b` is `[1, 64]`.
 	fn on_idle_check(v: u32, b: u32, ) -> Weight {
diff --git a/polkadot/runtime/westend/src/weights/pallet_staking.rs b/polkadot/runtime/westend/src/weights/pallet_staking.rs
index f1e7f5ba157..f0491a1daf6 100644
--- a/polkadot/runtime/westend/src/weights/pallet_staking.rs
+++ b/polkadot/runtime/westend/src/weights/pallet_staking.rs
@@ -48,6 +48,16 @@ use core::marker::PhantomData;
 /// Weight functions for `pallet_staking`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
+	// TODO CI-FAIL: run CI bench bot
+	fn on_initialize_noop() -> Weight {
+	    Default::default()
+	}
+	fn clear_election_metadata() -> Weight {
+	    Default::default()
+	}
+	fn do_elect_paged_inner(_v: u32,) -> Weight {
+		Default::default()
+	}
 	/// Storage: `Staking::Bonded` (r:1 w:1)
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
@@ -490,8 +500,6 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:65 w:65)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ErasStakersClipped` (r:1 w:0)
-	/// Proof: `Staking::ErasStakersClipped` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Staking::ErasStakersOverview` (r:1 w:0)
 	/// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::ClaimedRewards` (r:1 w:1)
@@ -600,61 +608,6 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListBags` (r:178 w:0)
 	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
-	/// Storage: `VoterList::ListNodes` (r:110 w:0)
-	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::Bonded` (r:110 w:0)
-	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::Ledger` (r:110 w:0)
-	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::Nominators` (r:110 w:0)
-	/// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::Validators` (r:11 w:0)
-	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::CounterForValidators` (r:1 w:0)
-	/// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ValidatorCount` (r:1 w:0)
-	/// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::MinimumValidatorCount` (r:1 w:0)
-	/// Proof: `Staking::MinimumValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::CurrentEra` (r:1 w:1)
-	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ErasValidatorPrefs` (r:0 w:10)
-	/// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ErasStakersPaged` (r:0 w:20)
-	/// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `Staking::ErasStakersOverview` (r:0 w:10)
-	/// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ErasTotalStake` (r:0 w:1)
-	/// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ErasStartSessionIndex` (r:0 w:1)
-	/// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::MinimumActiveStake` (r:0 w:1)
-	/// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
-	/// The range of component `v` is `[1, 10]`.
-	/// The range of component `n` is `[0, 100]`.
-	fn new_era(v: u32, n: u32, ) -> Weight {
-		// Proof Size summary in bytes:
-		//  Measured:  `0 + n * (716 ±0) + v * (3594 ±0)`
-		//  Estimated: `456136 + n * (3566 ±4) + v * (3566 ±40)`
-		// Minimum execution time: 654_756_000 picoseconds.
-		Weight::from_parts(658_861_000, 0)
-			.saturating_add(Weight::from_parts(0, 456136))
-			// Standard Error: 2_078_102
-			.saturating_add(Weight::from_parts(67_775_668, 0).saturating_mul(v.into()))
-			// Standard Error: 207_071
-			.saturating_add(Weight::from_parts(22_624_711, 0).saturating_mul(n.into()))
-			.saturating_add(T::DbWeight::get().reads(184))
-			.saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into())))
-			.saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into())))
-			.saturating_add(T::DbWeight::get().writes(8))
-			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into())))
-			.saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into()))
-			.saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into()))
-	}
-	/// Storage: `VoterList::CounterForListNodes` (r:1 w:0)
-	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `VoterList::ListBags` (r:178 w:0)
-	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListNodes` (r:2000 w:0)
 	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Bonded` (r:2000 w:0)
diff --git a/prdoc/pr_6034.prdoc b/prdoc/pr_6034.prdoc
new file mode 100644
index 00000000000..e6ecd8aae5c
--- /dev/null
+++ b/prdoc/pr_6034.prdoc
@@ -0,0 +1,25 @@
+title: Adds multi-block election types and refactors current single logic to support it
+
+doc:
+  - audience: Runtime Dev
+    description: |
+      This PR adds election types and structs required to run a multi-block election. In addition,
+      it modifies EPM, staking pallet and all dependent pallets and logic to use the multi-block types.
+
+crates:
+ - name: frame-election-provider-support
+   bump: major
+ - name: pallet-election-provider-multi-phase
+   bump: major
+ - name: pallet-staking
+   bump: major
+ - name: pallet-fast-unstake
+   bump: minor
+ - name: pallet-delegated-staking
+   bump: minor
+ - name: sp-npos-elections
+   bump: major
+ - name: sp-staking
+   bump: major
+ - name: pallet-bags-list-remote-tests
+   bump: minor
diff --git a/prdoc/pr_6689.prdoc b/prdoc/pr_6689.prdoc
index 2cbb49cd7dd..72e935e2e98 100644
--- a/prdoc/pr_6689.prdoc
+++ b/prdoc/pr_6689.prdoc
@@ -1,13 +1,12 @@
 title: '[pallet-revive] Update gas encoding'
 doc:
 - audience: Runtime Dev
-  description: |-
+  description: |
     Update the current approach to attach the `ref_time`, `pov` and `deposit` parameters to an Ethereum transaction.
-Previously, these three parameters were passed along with the signed payload, and the fees resulting from gas × gas_price were checked to ensure they matched the actual fees paid by the user for the extrinsic
-
+    Previously, these three parameters were passed along with the signed payload, and the fees resulting from gas × gas_price were checked to ensure they matched the actual fees paid by the user for the extrinsic
     This approach unfortunately can be attacked. A malicious actor could force such a transaction to fail by injecting low values for some of these extra parameters as they are not part of the signed payload.
-
     The new approach encodes these 3 extra parameters in the lower digits of the transaction gas, using the log2 of the actual values to  encode each components on 2 digits
+
 crates:
 - name: pallet-revive-eth-rpc
   bump: minor
diff --git a/prdoc/pr_7042.prdoc b/prdoc/pr_7042.prdoc
index 00fb34c6af4..1c585f9dff0 100644
--- a/prdoc/pr_7042.prdoc
+++ b/prdoc/pr_7042.prdoc
@@ -1,4 +1,4 @@
-title: `networking::TransactionPool` should accept `Arc`
+title: networking::TransactionPool should accept Arc
 doc:
 - audience: Node Dev
   description: The `sc_network_transactions::config::TransactionPool` trait now returns an `Arc` for transactions.
@@ -6,4 +6,4 @@ crates:
 - name: sc-network-transactions
   bump: minor
 - name: sc-service
-  bump: minor
\ No newline at end of file
+  bump: minor
diff --git a/prdoc/pr_7282.prdoc b/prdoc/pr_7282.prdoc
new file mode 100644
index 00000000000..3d12a8b184a
--- /dev/null
+++ b/prdoc/pr_7282.prdoc
@@ -0,0 +1,72 @@
+title: AHM Multi-block staking election pallet
+doc:
+- audience: Runtime Dev
+  description: |
+    ## Multi Block Election Pallet
+
+    This PR adds the first iteration of the multi-block staking pallet.
+
+    From this point onwards, the staking and its election provider pallets are being customized to work in AssetHub. While usage in solo-chains is still possible, it is not longer the main focus of this pallet. For a safer usage, please fork and user an older version of this pallet.
+crates:
+- name: pallet-election-provider-multi-block
+  bump: major
+- name: frame-election-provider-support
+  bump: major
+- name: frame-election-provider-solution-type
+  bump: major
+- name: sp-npos-elections
+  bump: major
+- name: sp-staking
+  bump: major
+- name: pallet-staking
+  bump: major
+- name: pallet-election-provider-multi-phase
+  bump: major
+- name: westend-runtime
+  bump: major
+- name: pallet-delegated-staking
+  bump: major
+- name: pallet-fast-unstake
+  bump: major
+- name: pallet-session-benchmarking
+  bump: major
+- name: sc-consensus-grandpa
+  bump: major
+- name: pallet-babe
+  bump: major
+- name: pallet-beefy
+  bump: major
+- name: pallet-grandpa
+  bump: major
+- name: pallet-nomination-pools
+  bump: major
+- name: pallet-root-offences
+  bump: major
+- name: pallet-nomination-pools-benchmarking
+  bump: major
+- name: pallet-offences-benchmarking
+  bump: major
+- name: cumulus-pov-validator
+  bump: major
+- name: polkadot-sdk
+  bump: major
+- name: asset-hub-rococo-runtime
+  bump: major
+- name: pallet-bags-list
+  bump: major
+- name: frame-benchmarking
+  bump: major
+- name: frame-support-procedural
+  bump: major
+- name: frame-support
+  bump: major
+- name: frame-benchmarking-cli
+  bump: major
+- name: polkadot-runtime-common
+  bump: major
+- name: pallet-elections-phragmen
+  bump: major
+- name: pallet-election-provider-support-benchmarking
+  bump: major
+- name: pallet-session
+  bump: major
diff --git a/substrate/.maintain/frame-umbrella-weight-template.hbs b/substrate/.maintain/frame-umbrella-weight-template.hbs
index c99758c41d9..6985944b0a3 100644
--- a/substrate/.maintain/frame-umbrella-weight-template.hbs
+++ b/substrate/.maintain/frame-umbrella-weight-template.hbs
@@ -1,3 +1,20 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 {{header}}
 //! Autogenerated weights for `{{pallet}}`
 //!
diff --git a/substrate/.maintain/frame-weight-template.hbs b/substrate/.maintain/frame-weight-template.hbs
index 624fc57aa32..c2a22200dc9 100644
--- a/substrate/.maintain/frame-weight-template.hbs
+++ b/substrate/.maintain/frame-weight-template.hbs
@@ -1,3 +1,20 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 {{header}}
 //! Autogenerated weights for `{{pallet}}`
 //!
diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml
index 7b355074823..46ccff34bf7 100644
--- a/substrate/bin/node/cli/Cargo.toml
+++ b/substrate/bin/node/cli/Cargo.toml
@@ -182,6 +182,9 @@ try-runtime = [
 	"polkadot-sdk/try-runtime",
 	"substrate-cli-test-utils/try-runtime",
 ]
+staking-playground = [
+	"kitchensink-runtime/staking-playground",
+]
 
 [[bench]]
 name = "transaction_pool"
diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs
index 038aa2f6092..af08ca7f609 100644
--- a/substrate/bin/node/cli/src/chain_spec.rs
+++ b/substrate/bin/node/cli/src/chain_spec.rs
@@ -347,6 +347,29 @@ pub fn testnet_genesis(
 ) -> serde_json::Value {
 	let (initial_authorities, endowed_accounts, num_endowed_accounts, stakers) =
 		configure_accounts(initial_authorities, initial_nominators, endowed_accounts, STASH);
+	const MAX_COLLECTIVE_SIZE: usize = 50;
+
+	let dev_stakers = if cfg!(feature = "staking-playground") {
+		let random_validators =
+			std::option_env!("VALIDATORS").map(|s| s.parse::<u32>().unwrap()).unwrap_or(100);
+		let random_nominators = std::option_env!("NOMINATORS")
+			.map(|s| s.parse::<u32>().unwrap())
+			.unwrap_or(3000);
+		Some((random_validators, random_nominators))
+	} else {
+		None
+	};
+
+	let validator_count = if cfg!(feature = "staking-playground") {
+		std::option_env!("VALIDATOR_COUNT")
+			.map(|v| v.parse::<u32>().unwrap())
+			.unwrap_or(100)
+	} else {
+		initial_authorities.len() as u32
+	};
+
+	let minimum_validator_count =
+		if cfg!(feature = "staking-playground") { 10 } else { initial_authorities.len() as u32 };
 
 	serde_json::json!({
 		"balances": {
@@ -372,16 +395,17 @@ pub fn testnet_genesis(
 				.collect::<Vec<_>>(),
 		},
 		"staking": {
-			"validatorCount": initial_authorities.len() as u32,
-			"minimumValidatorCount": initial_authorities.len() as u32,
+			"validatorCount": validator_count,
+			"minimumValidatorCount": minimum_validator_count,
 			"invulnerables": initial_authorities.iter().map(|x| x.0.clone()).collect::<Vec<_>>(),
 			"slashRewardFraction": Perbill::from_percent(10),
 			"stakers": stakers.clone(),
+			"devStakers": dev_stakers
 		},
 		"elections": {
 			"members": endowed_accounts
 				.iter()
-				.take((num_endowed_accounts + 1) / 2)
+				.take(((num_endowed_accounts + 1) / 2).min(MAX_COLLECTIVE_SIZE))
 				.cloned()
 				.map(|member| (member, STASH))
 				.collect::<Vec<_>>(),
@@ -389,7 +413,7 @@ pub fn testnet_genesis(
 		"technicalCommittee": {
 			"members": endowed_accounts
 				.iter()
-				.take((num_endowed_accounts + 1) / 2)
+				.take(((num_endowed_accounts + 1) / 2).min(MAX_COLLECTIVE_SIZE))
 				.cloned()
 				.collect::<Vec<_>>(),
 		},
diff --git a/substrate/bin/node/cli/tests/res/default_genesis_config.json b/substrate/bin/node/cli/tests/res/default_genesis_config.json
index 8ad2428f785..4d846252021 100644
--- a/substrate/bin/node/cli/tests/res/default_genesis_config.json
+++ b/substrate/bin/node/cli/tests/res/default_genesis_config.json
@@ -22,6 +22,7 @@
     "multiplier": "1000000000000000000"
   },
   "staking": {
+    "devStakers": null,
     "validatorCount": 0,
     "minimumValidatorCount": 0,
     "invulnerables": [],
diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml
index 6d377cc92cc..07c97f8c271 100644
--- a/substrate/bin/node/runtime/Cargo.toml
+++ b/substrate/bin/node/runtime/Cargo.toml
@@ -74,3 +74,9 @@ experimental = [
 	"pallet-example-tasks/experimental",
 ]
 metadata-hash = ["substrate-wasm-builder/metadata-hash"]
+# Test temp feature to allow this chain to be used for swift testing of staking elections. should
+# only be run by --dev chain. It will create a large staking election process as per the constants
+# in `chain_spec.rs`, but `Alice` will be the only authority that is communicated to the node and
+# ergo block production works fine with --dev and is independent of staking election. See ` pub
+# struct AliceAsOnlyValidator`.
+staking-playground = []
diff --git a/substrate/bin/node/runtime/src/constants.rs b/substrate/bin/node/runtime/src/constants.rs
index d13dca48d1f..576ed540180 100644
--- a/substrate/bin/node/runtime/src/constants.rs
+++ b/substrate/bin/node/runtime/src/constants.rs
@@ -63,7 +63,8 @@ pub mod time {
 
 	// NOTE: Currently it is not possible to change the epoch duration after the chain has started.
 	//       Attempting to do so will brick block production.
-	pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 10 * MINUTES;
+	pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 20 * MINUTES;
+
 	pub const EPOCH_DURATION_IN_SLOTS: u64 = {
 		const SLOT_FILL_RATE: f64 = MILLISECS_PER_BLOCK as f64 / SLOT_DURATION as f64;
 
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index 3cbc8aa7115..d3dd08369e7 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -673,16 +673,119 @@ impl_opaque_keys! {
 	}
 }
 
+#[cfg(feature = "staking-playground")]
+pub mod staking_playground {
+	use pallet_staking::Exposure;
+
+	use super::*;
+
+	/// An adapter to make the chain work with --dev only, even though it is running a large staking
+	/// election.
+	///
+	/// It will ignore the staking election and just set the validator set to alice.
+	///
+	/// Needs to be fed into `type SessionManager`.
+	pub struct AliceAsOnlyValidator;
+	impl pallet_session::SessionManager<AccountId> for AliceAsOnlyValidator {
+		fn end_session(end_index: sp_staking::SessionIndex) {
+			<Staking as pallet_session::SessionManager<AccountId>>::end_session(end_index)
+		}
+
+		fn new_session(new_index: sp_staking::SessionIndex) -> Option<Vec<AccountId>> {
+			<Staking as pallet_session::SessionManager<AccountId>>::new_session(new_index).map(
+				|_ignored_validators| {
+					vec![sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into()]
+				},
+			)
+		}
+
+		fn new_session_genesis(new_index: sp_staking::SessionIndex) -> Option<Vec<AccountId>> {
+			<Staking as pallet_session::SessionManager<AccountId>>::new_session_genesis(new_index)
+				.map(|_ignored_validators| {
+					vec![sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into()]
+				})
+		}
+
+		fn start_session(start_index: sp_staking::SessionIndex) {
+			<Staking as pallet_session::SessionManager<AccountId>>::start_session(start_index)
+		}
+	}
+
+	impl pallet_session::historical::SessionManager<AccountId, Exposure<AccountId, Balance>>
+		for AliceAsOnlyValidator
+	{
+		fn end_session(end_index: sp_staking::SessionIndex) {
+			<Staking as pallet_session::historical::SessionManager<
+				AccountId,
+				Exposure<AccountId, Balance>,
+			>>::end_session(end_index)
+		}
+
+		fn new_session(
+			new_index: sp_staking::SessionIndex,
+		) -> Option<Vec<(AccountId, Exposure<AccountId, Balance>)>> {
+			<Staking as pallet_session::historical::SessionManager<
+				AccountId,
+				Exposure<AccountId, Balance>,
+			>>::new_session(new_index)
+			.map(|_ignored| {
+				// construct a fake exposure for alice.
+				vec![(
+					sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into(),
+					pallet_staking::Exposure {
+						total: 1_000_000_000,
+						own: 1_000_000_000,
+						others: vec![],
+					},
+				)]
+			})
+		}
+
+		fn new_session_genesis(
+			new_index: sp_staking::SessionIndex,
+		) -> Option<Vec<(AccountId, Exposure<AccountId, Balance>)>> {
+			<Staking as pallet_session::historical::SessionManager<
+				AccountId,
+				Exposure<AccountId, Balance>,
+			>>::new_session_genesis(new_index)
+			.map(|_ignored| {
+				// construct a fake exposure for alice.
+				vec![(
+					sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into(),
+					pallet_staking::Exposure {
+						total: 1_000_000_000,
+						own: 1_000_000_000,
+						others: vec![],
+					},
+				)]
+			})
+		}
+
+		fn start_session(start_index: sp_staking::SessionIndex) {
+			<Staking as pallet_session::historical::SessionManager<
+				AccountId,
+				Exposure<AccountId, Balance>,
+			>>::start_session(start_index)
+		}
+	}
+}
+
 impl pallet_session::Config for Runtime {
 	type RuntimeEvent = RuntimeEvent;
 	type ValidatorId = <Self as frame_system::Config>::AccountId;
 	type ValidatorIdOf = pallet_staking::StashOf<Self>;
 	type ShouldEndSession = Babe;
 	type NextSessionRotation = Babe;
-	type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, Staking>;
 	type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
 	type WeightInfo = pallet_session::weights::SubstrateWeight<Runtime>;
+	#[cfg(not(feature = "staking-playground"))]
+	type SessionManager = pallet_session::historical::NoteHistoricalRoot<Self, Staking>;
+	#[cfg(feature = "staking-playground")]
+	type SessionManager = pallet_session::historical::NoteHistoricalRoot<
+		Self,
+		staking_playground::AliceAsOnlyValidator,
+	>;
 }
 
 impl pallet_session::historical::Config for Runtime {
@@ -701,8 +804,16 @@ pallet_staking_reward_curve::build! {
 	);
 }
 
+#[cfg(not(feature = "staking-playground"))]
 parameter_types! {
 	pub const SessionsPerEra: sp_staking::SessionIndex = 6;
+}
+#[cfg(feature = "staking-playground")]
+parameter_types! {
+	pub const SessionsPerEra: sp_staking::SessionIndex = 2;
+}
+
+parameter_types! {
 	pub const BondingDuration: sp_staking::EraIndex = 24 * 28;
 	pub const SlashDeferDuration: sp_staking::EraIndex = 24 * 7; // 1/4 the bonding duration.
 	pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE;
@@ -717,10 +828,35 @@ const MAX_QUOTA_NOMINATIONS: u32 = 16;
 
 pub struct StakingBenchmarkingConfig;
 impl pallet_staking::BenchmarkingConfig for StakingBenchmarkingConfig {
-	type MaxNominators = ConstU32<1000>;
+	type MaxNominators = ConstU32<5000>;
 	type MaxValidators = ConstU32<1000>;
 }
 
+use frame_election_provider_support::{BoundedSupportsOf, ElectionProvider, PageIndex};
+pub struct MultiElectionProvider;
+impl ElectionProvider for MultiElectionProvider {
+	type AccountId = <MultiBlock as ElectionProvider>::AccountId;
+	type BlockNumber = <MultiBlock as ElectionProvider>::BlockNumber;
+	type DataProvider = <MultiBlock as ElectionProvider>::DataProvider;
+	type Error = <MultiBlock as ElectionProvider>::Error;
+	type Pages = <MultiBlock as ElectionProvider>::Pages;
+	type MaxBackersPerWinner = <MultiBlock as ElectionProvider>::MaxBackersPerWinner;
+	type MaxWinnersPerPage = <MultiBlock as ElectionProvider>::MaxWinnersPerPage;
+
+	fn elect(page: PageIndex) -> Result<BoundedSupportsOf<Self>, Self::Error> {
+		if page == 0 && !cfg!(feature = "runtime-benchmarks") {
+			// TODO: later on, we can even compare the results of the multi-page and multi-block
+			// election in here.
+			let _ = ElectionProviderMultiPhase::elect(page);
+		}
+		MultiBlock::elect(page)
+	}
+
+	fn ongoing() -> bool {
+		MultiBlock::ongoing()
+	}
+}
+
 impl pallet_staking::Config for Runtime {
 	type OldCurrency = Balances;
 	type Currency = Balances;
@@ -743,8 +879,9 @@ impl pallet_staking::Config for Runtime {
 	type SessionInterface = Self;
 	type EraPayout = pallet_staking::ConvertCurve<RewardCurve>;
 	type NextNewSession = Session;
-	type MaxExposurePageSize = ConstU32<256>;
-	type ElectionProvider = ElectionProviderMultiPhase;
+	type MaxExposurePageSize = multi_block_impls::MaxExposurePageSize;
+	type MaxValidatorSet = multi_block_impls::MaxWinnersPerPage;
+	type ElectionProvider = MultiElectionProvider;
 	type GenesisElectionProvider = onchain::OnChainExecution<OnChainSeqPhragmen>;
 	type VoterList = VoterList;
 	type NominationsQuota = pallet_staking::FixedNominationsQuota<MAX_QUOTA_NOMINATIONS>;
@@ -757,6 +894,8 @@ impl pallet_staking::Config for Runtime {
 	type WeightInfo = pallet_staking::weights::SubstrateWeight<Runtime>;
 	type BenchmarkingConfig = StakingBenchmarkingConfig;
 	type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy;
+	type MaxInvulnerables = ConstU32<20>;
+	type MaxDisabledValidators = ConstU32<100>;
 }
 
 impl pallet_fast_unstake::Config for Runtime {
@@ -770,10 +909,132 @@ impl pallet_fast_unstake::Config for Runtime {
 	type WeightInfo = ();
 }
 
+frame_election_provider_support::generate_solution_type!(
+	#[compact]
+	pub struct NposSolution16::<
+		VoterIndex = u32,
+		TargetIndex = u16,
+		Accuracy = sp_runtime::PerU16,
+		MaxVoters = ConstU32<22500>,
+	>(16)
+);
+
+pub(crate) mod multi_block_impls {
+	use super::*;
+	use pallet_election_provider_multi_block as multi_block;
+	use pallet_election_provider_multi_phase as multi_phase;
+
+	frame_election_provider_support::generate_solution_type!(
+		#[compact]
+		pub struct MultiBlockSolution::<
+			VoterIndex = u16,
+			TargetIndex = u16,
+			Accuracy = sp_runtime::Percent,
+			MaxVoters = ConstU32<{22500 / Pages::get()}>,
+		>(16)
+	);
+
+	parameter_types! {
+		pub const Pages: u32 = 32;
+		// nominators snapshot size
+		pub VoterSnapshotPerBlock: u32 = 22500 / Pages::get();
+		// validator snapshot size
+		pub TargetSnapshotPerBlock: u32 = 1000;
+		pub SignedPhase: u32 = 3 * EPOCH_DURATION_IN_BLOCKS / 4;
+		// 2 signed solutions to be validate
+		pub SignedValidation: u32 = Pages::get() * 2;
+		pub UnsignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4;
+		pub MaxWinnersPerPage: u32 = 1000;
+		pub MaxBackersPerWinner: u32 = 128;
+		pub MaxExposurePageSize: u32 = 32;
+	}
+
+	impl multi_block::unsigned::miner::MinerConfig for Runtime {
+		type AccountId = AccountId;
+		type Hash = Hash;
+		type MaxBackersPerWinner = <Self as multi_block::verifier::Config>::MaxBackersPerWinner;
+		type MaxBackersPerWinnerFinal =
+			<Self as multi_block::verifier::Config>::MaxBackersPerWinnerFinal;
+		type MaxWinnersPerPage = <Self as multi_block::verifier::Config>::MaxWinnersPerPage;
+		type MaxVotesPerVoter =
+			<<Self as multi_block::Config>::DataProvider as ElectionDataProvider>::MaxVotesPerVoter;
+		type MaxLength = MinerMaxLength;
+		type Solver = <Runtime as multi_block::unsigned::Config>::OffchainSolver;
+		type Pages = Pages;
+		type Solution = MultiBlockSolution;
+		type VoterSnapshotPerBlock = <Runtime as multi_block::Config>::VoterSnapshotPerBlock;
+		type TargetSnapshotPerBlock = <Runtime as multi_block::Config>::TargetSnapshotPerBlock;
+	}
+
+	impl multi_block::Config for Runtime {
+		type AdminOrigin = EnsureRoot<AccountId>;
+		type RuntimeEvent = RuntimeEvent;
+		type DataProvider = Staking;
+		#[cfg(not(feature = "runtime-benchmarks"))]
+		type Fallback = multi_block::Continue<Self>;
+		#[cfg(feature = "runtime-benchmarks")]
+		type Fallback = onchain::OnChainExecution<OnChainSeqPhragmen>;
+		// prepare for election 5 blocks ahead of time
+		type Lookahead = ConstU32<5>;
+		// split election into 8 pages.
+		type Pages = Pages;
+		// allow 2 signed solutions to be verified.
+		type SignedValidationPhase = SignedValidation;
+		// TODO: sanity check that the length of all phases is within reason.
+		type SignedPhase = SignedPhase;
+		type UnsignedPhase = UnsignedPhase;
+		type TargetSnapshotPerBlock = TargetSnapshotPerBlock;
+		type VoterSnapshotPerBlock = VoterSnapshotPerBlock;
+		type Verifier = MultiBlockVerifier;
+		type MinerConfig = Self;
+		type WeightInfo = multi_block::weights::AllZeroWeights;
+	}
+
+	impl multi_block::verifier::Config for Runtime {
+		type MaxBackersPerWinner = MaxBackersPerWinner;
+		type MaxWinnersPerPage = MaxWinnersPerPage;
+		type MaxBackersPerWinnerFinal = ConstU32<{ u32::MAX }>;
+		type RuntimeEvent = RuntimeEvent;
+		type SolutionDataProvider = MultiBlockSigned;
+		type SolutionImprovementThreshold = ();
+		type WeightInfo = multi_block::weights::AllZeroWeights;
+	}
+
+	parameter_types! {
+		pub const BailoutGraceRatio: Perbill = Perbill::from_percent(50);
+	}
+
+	impl multi_block::signed::Config for Runtime {
+		type BailoutGraceRatio = BailoutGraceRatio;
+		// TODO: we need an increase factor for this pallet as well.
+		type DepositBase = SignedFixedDeposit;
+		type DepositPerPage = SignedDepositByte;
+		type MaxSubmissions = ConstU32<8>;
+		type RewardBase = SignedRewardBase;
+
+		type EstimateCallFee = TransactionPayment;
+		type Currency = Balances;
+
+		type RuntimeEvent = RuntimeEvent;
+		type RuntimeHoldReason = RuntimeHoldReason;
+		type WeightInfo = multi_block::weights::AllZeroWeights;
+	}
+
+	impl multi_block::unsigned::Config for Runtime {
+		type OffchainSolver = <Runtime as multi_phase::Config>::Solver;
+		// offchain usage of miner configs
+		type MinerTxPriority = <Runtime as multi_phase::Config>::MinerTxPriority;
+		// TODO: this needs to be an educated number: "estimate mining time per page * pages"
+		type OffchainRepeat = ConstU32<5>;
+
+		type WeightInfo = multi_block::weights::AllZeroWeights;
+	}
+}
+
 parameter_types! {
-	// phase durations. 1/4 of the last session for each.
-	pub const SignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4;
-	pub const UnsignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4;
+	// phase durations. 1/2 of the last session for each.
+	pub const SignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 2;
+	pub const UnsignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 2;
 
 	// signed config
 	pub const SignedRewardBase: Balance = 1 * DOLLARS;
@@ -794,29 +1055,15 @@ parameter_types! {
 		.get(DispatchClass::Normal);
 }
 
-frame_election_provider_support::generate_solution_type!(
-	#[compact]
-	pub struct NposSolution16::<
-		VoterIndex = u32,
-		TargetIndex = u16,
-		Accuracy = sp_runtime::PerU16,
-		MaxVoters = MaxElectingVotersSolution,
-	>(16)
-);
-
 parameter_types! {
-	// Note: the EPM in this runtime runs the election on-chain. The election bounds must be
-	// carefully set so that an election round fits in one block.
+	/// Note: the EPM in this runtime runs the election on-chain. The election bounds must be
+	/// carefully set so that an election round fits in one block.
 	pub ElectionBoundsMultiPhase: ElectionBounds = ElectionBoundsBuilder::default()
 		.voters_count(10_000.into()).targets_count(1_500.into()).build();
 	pub ElectionBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default()
 		.voters_count(5_000.into()).targets_count(1_250.into()).build();
 
 	pub MaxNominations: u32 = <NposSolution16 as frame_election_provider_support::NposSolution>::LIMIT as u32;
-	pub MaxElectingVotersSolution: u32 = 40_000;
-	// The maximum winners that can be elected by the Election pallet which is equivalent to the
-	// maximum active validators the staking pallet can have.
-	pub MaxActiveValidators: u32 = 1000;
 }
 
 /// The numbers configured here could always be more than the the maximum limits of staking pallet
@@ -860,6 +1107,7 @@ impl Get<Option<BalancingConfig>> for OffchainRandomBalancing {
 
 pub struct OnChainSeqPhragmen;
 impl onchain::Config for OnChainSeqPhragmen {
+	type Sort = ConstBool<true>;
 	type System = Runtime;
 	type Solver = SequentialPhragmen<
 		AccountId,
@@ -867,8 +1115,10 @@ impl onchain::Config for OnChainSeqPhragmen {
 	>;
 	type DataProvider = <Runtime as pallet_election_provider_multi_phase::Config>::DataProvider;
 	type WeightInfo = frame_election_provider_support::weights::SubstrateWeight<Runtime>;
-	type MaxWinners = <Runtime as pallet_election_provider_multi_phase::Config>::MaxWinners;
 	type Bounds = ElectionBoundsOnChain;
+	type MaxBackersPerWinner =
+		<Runtime as pallet_election_provider_multi_phase::Config>::MaxBackersPerWinner;
+	type MaxWinnersPerPage = multi_block_impls::MaxWinnersPerPage;
 }
 
 impl pallet_election_provider_multi_phase::MinerConfig for Runtime {
@@ -878,7 +1128,8 @@ impl pallet_election_provider_multi_phase::MinerConfig for Runtime {
 	type Solution = NposSolution16;
 	type MaxVotesPerVoter =
 	<<Self as pallet_election_provider_multi_phase::Config>::DataProvider as ElectionDataProvider>::MaxVotesPerVoter;
-	type MaxWinners = MaxActiveValidators;
+	type MaxWinners = multi_block_impls::MaxWinnersPerPage;
+	type MaxBackersPerWinner = multi_block_impls::MaxBackersPerWinner;
 
 	// The unsigned submissions have to respect the weight of the submit_unsigned call, thus their
 	// weight estimate function is wired to this call's weight.
@@ -912,11 +1163,21 @@ impl pallet_election_provider_multi_phase::Config for Runtime {
 	type SlashHandler = (); // burn slashes
 	type RewardHandler = (); // rewards are minted from the void
 	type DataProvider = Staking;
+	#[cfg(not(feature = "runtime-benchmarks"))]
+	type Fallback = frame_election_provider_support::NoElection<(
+		AccountId,
+		BlockNumber,
+		Staking,
+		multi_block_impls::MaxWinnersPerPage,
+		multi_block_impls::MaxBackersPerWinner,
+	)>;
+	#[cfg(feature = "runtime-benchmarks")]
 	type Fallback = onchain::OnChainExecution<OnChainSeqPhragmen>;
 	type GovernanceFallback = onchain::OnChainExecution<OnChainSeqPhragmen>;
 	type Solver = SequentialPhragmen<AccountId, SolutionAccuracyOf<Self>, OffchainRandomBalancing>;
 	type ForceOrigin = EnsureRootOrHalfCouncil;
-	type MaxWinners = MaxActiveValidators;
+	type MaxWinners = multi_block_impls::MaxWinnersPerPage;
+	type MaxBackersPerWinner = multi_block_impls::MaxBackersPerWinner;
 	type ElectionBounds = ElectionBoundsMultiPhase;
 	type BenchmarkingConfig = ElectionProviderBenchmarkConfig;
 	type WeightInfo = pallet_election_provider_multi_phase::weights::SubstrateWeight<Self>;
@@ -1232,8 +1493,8 @@ parameter_types! {
 	pub const DesiredMembers: u32 = 13;
 	pub const DesiredRunnersUp: u32 = 7;
 	pub const MaxVotesPerVoter: u32 = 16;
-	pub const MaxVoters: u32 = 512;
-	pub const MaxCandidates: u32 = 64;
+	pub const MaxVoters: u32 = 256;
+	pub const MaxCandidates: u32 = 128;
 	pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect";
 }
 
@@ -1513,7 +1774,7 @@ parameter_types! {
 	pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value();
 	/// We prioritize im-online heartbeats over election solution submission.
 	pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2;
-	pub const MaxAuthorities: u32 = 100;
+	pub const MaxAuthorities: u32 = 1000;
 	pub const MaxKeys: u32 = 10_000;
 	pub const MaxPeerInHeartbeats: u32 = 10_000;
 }
@@ -2728,6 +2989,16 @@ mod runtime {
 
 	#[runtime::pallet_index(84)]
 	pub type AssetsFreezer = pallet_assets_freezer::Pallet<Runtime, Instance1>;
+
+	// Order is important!
+	#[runtime::pallet_index(85)]
+	pub type MultiBlock = pallet_election_provider_multi_block::Pallet<Runtime>;
+	#[runtime::pallet_index(86)]
+	pub type MultiBlockVerifier = pallet_election_provider_multi_block::verifier::Pallet<Runtime>;
+	#[runtime::pallet_index(87)]
+	pub type MultiBlockUnsigned = pallet_election_provider_multi_block::unsigned::Pallet<Runtime>;
+	#[runtime::pallet_index(88)]
+	pub type MultiBlockSigned = pallet_election_provider_multi_block::signed::Pallet<Runtime>;
 }
 
 impl TryFrom<RuntimeCall> for pallet_revive::Call<Runtime> {
@@ -2942,6 +3213,10 @@ mod benches {
 		[pallet_asset_conversion_tx_payment, AssetConversionTxPayment]
 		[pallet_transaction_payment, TransactionPayment]
 		[pallet_election_provider_multi_phase, ElectionProviderMultiPhase]
+		[pallet_election_provider_multi_block, MultiBlock]
+		[pallet_election_provider_multi_block::verifier, MultiBlockVerifier]
+		[pallet_election_provider_multi_block::unsigned, MultiBlockUnsigned]
+		[pallet_election_provider_multi_block::signed, MultiBlockSigned]
 		[pallet_election_provider_support_benchmarking, EPSBench::<Runtime>]
 		[pallet_elections_phragmen, Elections]
 		[pallet_fast_unstake, FastUnstake]
diff --git a/substrate/bin/node/testing/src/genesis.rs b/substrate/bin/node/testing/src/genesis.rs
index 624b00b4d6c..aaa19e15d07 100644
--- a/substrate/bin/node/testing/src/genesis.rs
+++ b/substrate/bin/node/testing/src/genesis.rs
@@ -24,7 +24,7 @@ use kitchensink_runtime::{
 	RuntimeGenesisConfig, SessionConfig, SocietyConfig, StakerStatus, StakingConfig,
 };
 use sp_keyring::Ed25519Keyring;
-use sp_runtime::Perbill;
+use sp_runtime::{BoundedVec, Perbill};
 
 /// Create genesis runtime configuration for tests.
 pub fn config() -> RuntimeGenesisConfig {
@@ -65,7 +65,8 @@ pub fn config_endowed(extra_endowed: Vec<AccountId>) -> RuntimeGenesisConfig {
 			validator_count: 3,
 			minimum_validator_count: 0,
 			slash_reward_fraction: Perbill::from_percent(10),
-			invulnerables: vec![alice(), bob(), charlie()],
+			invulnerables: BoundedVec::try_from(vec![alice(), bob(), charlie()])
+				.expect("Too many invulnerable validators: upper limit is MaxInvulnerables from pallet staking config"),
 			..Default::default()
 		},
 		society: SocietyConfig { pot: 0 },
diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs
index 6f9f54cc7ef..1e4f51d5143 100644
--- a/substrate/frame/babe/src/mock.rs
+++ b/substrate/frame/babe/src/mock.rs
@@ -31,7 +31,7 @@ use pallet_session::historical as pallet_session_historical;
 use sp_consensus_babe::{AuthorityId, AuthorityPair, Randomness, Slot, VrfSignature};
 use sp_core::{
 	crypto::{Pair, VrfSecret},
-	U256,
+	ConstBool, U256,
 };
 use sp_io;
 use sp_runtime::{
@@ -39,7 +39,7 @@ use sp_runtime::{
 	impl_opaque_keys,
 	testing::{Digest, DigestItem, Header, TestXt},
 	traits::{Header as _, OpaqueKeys},
-	BuildStorage, Perbill,
+	BoundedVec, BuildStorage, Perbill,
 };
 use sp_staking::{EraIndex, SessionIndex};
 
@@ -151,7 +151,9 @@ impl onchain::Config for OnChainSeqPhragmen {
 	type Solver = SequentialPhragmen<DummyValidatorId, Perbill>;
 	type DataProvider = Staking;
 	type WeightInfo = ();
-	type MaxWinners = ConstU32<100>;
+	type MaxWinnersPerPage = ConstU32<100>;
+	type MaxBackersPerWinner = ConstU32<100>;
+	type Sort = ConstBool<true>;
 	type Bounds = ElectionsBounds;
 }
 
@@ -343,7 +345,7 @@ pub fn new_test_ext_raw_authorities(authorities: Vec<AuthorityId>) -> sp_io::Tes
 		validator_count: 8,
 		force_era: pallet_staking::Forcing::ForceNew,
 		minimum_validator_count: 0,
-		invulnerables: vec![],
+		invulnerables: BoundedVec::new(),
 		..Default::default()
 	};
 
diff --git a/substrate/frame/bags-list/remote-tests/src/snapshot.rs b/substrate/frame/bags-list/remote-tests/src/snapshot.rs
index 5f999aa0b8b..f8ba7b8d024 100644
--- a/substrate/frame/bags-list/remote-tests/src/snapshot.rs
+++ b/substrate/frame/bags-list/remote-tests/src/snapshot.rs
@@ -22,7 +22,10 @@ use frame_election_provider_support::{
 };
 use frame_support::traits::PalletInfoAccess;
 use remote_externalities::{Builder, Mode, OnlineConfig};
-use sp_runtime::{traits::Block as BlockT, DeserializeOwned};
+use sp_runtime::{
+	traits::{Block as BlockT, Zero},
+	DeserializeOwned,
+};
 
 /// Execute create a snapshot from pallet-staking.
 pub async fn execute<Runtime, Block>(voter_limit: Option<usize>, currency_unit: u64, ws_url: String)
@@ -70,8 +73,9 @@ where
 			Some(v) => DataProviderBounds { count: Some(CountBound(v as u32)), size: None },
 		};
 
+		// single page voter snapshot, thus page index == 0.
 		let voters =
-			<pallet_staking::Pallet<Runtime> as ElectionDataProvider>::electing_voters(bounds)
+			<pallet_staking::Pallet<Runtime> as ElectionDataProvider>::electing_voters(bounds, Zero::zero())
 				.unwrap();
 
 		let mut voters_nominator_only = voters
diff --git a/substrate/frame/bags-list/src/benchmarks.rs b/substrate/frame/bags-list/src/benchmarks.rs
index 55f4c24835e..7db4c4bb359 100644
--- a/substrate/frame/bags-list/src/benchmarks.rs
+++ b/substrate/frame/bags-list/src/benchmarks.rs
@@ -29,6 +29,125 @@ use frame_system::RawOrigin as SystemOrigin;
 use sp_runtime::traits::One;
 
 benchmarks_instance_pallet! {
+	// iteration of any number of items should only touch that many nodes and bags.
+	#[extra]
+	iter {
+		let n = 100;
+
+		// clear any pre-existing storage.
+		List::<T, _>::unsafe_clear();
+
+		// add n nodes, half to first bag and half to second bag.
+		let bag_thresh = T::BagThresholds::get()[0];
+		let second_bag_thresh = T::BagThresholds::get()[1];
+
+
+		for i in 0..n/2 {
+			let node: T::AccountId = account("node", i, 0);
+			assert_ok!(List::<T, _>::insert(node.clone(), bag_thresh - One::one()));
+		}
+		for i in 0..n/2 {
+			let node: T::AccountId = account("node", i, 1);
+			assert_ok!(List::<T, _>::insert(node.clone(), bag_thresh + One::one()));
+		}
+		assert_eq!(
+			List::<T, _>::get_bags().into_iter().map(|(bag, nodes)| (bag, nodes.len())).collect::<Vec<_>>(),
+			vec![
+				(bag_thresh, (n / 2) as usize),
+				(second_bag_thresh, (n / 2) as usize),
+			]
+		);
+	}: {
+		let voters = List::<T, _>::iter();
+		let len = voters.collect::<Vec<_>>().len();
+		assert!(len as u32 == n, "len is {}, expected {}", len, n);
+	}
+
+	// iteration of any number of items should only touch that many nodes and bags.
+	#[extra]
+	iter_take {
+		let n = 100;
+
+		// clear any pre-existing storage.
+		List::<T, _>::unsafe_clear();
+
+		// add n nodes, half to first bag and half to second bag.
+		let bag_thresh = T::BagThresholds::get()[0];
+		let second_bag_thresh = T::BagThresholds::get()[1];
+
+
+		for i in 0..n/2 {
+			let node: T::AccountId = account("node", i, 0);
+			assert_ok!(List::<T, _>::insert(node.clone(), bag_thresh - One::one()));
+		}
+		for i in 0..n/2 {
+			let node: T::AccountId = account("node", i, 1);
+			assert_ok!(List::<T, _>::insert(node.clone(), bag_thresh + One::one()));
+		}
+		assert_eq!(
+			List::<T, _>::get_bags().into_iter().map(|(bag, nodes)| (bag, nodes.len())).collect::<Vec<_>>(),
+			vec![
+				(bag_thresh, (n / 2) as usize),
+				(second_bag_thresh, (n / 2) as usize),
+			]
+		);
+	}: {
+		// this should only go into one of the bags
+		let voters = List::<T, _>::iter().take(n as usize / 4 );
+		let len = voters.collect::<Vec<_>>().len();
+		assert!(len as u32 == n / 4, "len is {}, expected {}", len, n / 4);
+	}
+
+	#[extra]
+	iter_from {
+		let n = 100;
+
+		// clear any pre-existing storage.
+		List::<T, _>::unsafe_clear();
+
+		// populate the first 4 bags with n/4 nodes each
+		let bag_thresh = T::BagThresholds::get()[0];
+
+		for i in 0..n/4 {
+			let node: T::AccountId = account("node", i, 0);
+			assert_ok!(List::<T, _>::insert(node.clone(), bag_thresh - One::one()));
+		}
+		for i in 0..n/4 {
+			let node: T::AccountId = account("node", i, 1);
+			assert_ok!(List::<T, _>::insert(node.clone(), bag_thresh + One::one()));
+		}
+
+		let bag_thresh = T::BagThresholds::get()[2];
+
+		for i in 0..n/4 {
+			let node: T::AccountId = account("node", i, 2);
+			assert_ok!(List::<T, _>::insert(node.clone(), bag_thresh - One::one()));
+		}
+
+		for i in 0..n/4 {
+			let node: T::AccountId = account("node", i, 3);
+			assert_ok!(List::<T, _>::insert(node.clone(), bag_thresh + One::one()));
+		}
+
+		assert_eq!(
+			List::<T, _>::get_bags().into_iter().map(|(bag, nodes)| (bag, nodes.len())).collect::<Vec<_>>(),
+			vec![
+				(T::BagThresholds::get()[0], (n / 4) as usize),
+				(T::BagThresholds::get()[1], (n / 4) as usize),
+				(T::BagThresholds::get()[2], (n / 4) as usize),
+				(T::BagThresholds::get()[3], (n / 4) as usize),
+			]
+		);
+
+		// iter from someone in the 3rd bag, so this should touch ~75 nodes and 3 bags
+		let from: T::AccountId = account("node", 0, 2);
+	}: {
+		let voters = List::<T, _>::iter_from(&from).unwrap();
+		let len = voters.collect::<Vec<_>>().len();
+		assert!(len as u32 == 74, "len is {}, expected {}", len, 74);
+	}
+
+
 	rebag_non_terminal {
 		// An expensive case for rebag-ing (rebag a non-terminal node):
 		//
diff --git a/substrate/frame/bags-list/src/lib.rs b/substrate/frame/bags-list/src/lib.rs
index ee36a3a3ebd..ae65cc0783c 100644
--- a/substrate/frame/bags-list/src/lib.rs
+++ b/substrate/frame/bags-list/src/lib.rs
@@ -148,7 +148,7 @@ pub use list::{notional_bag_for, Bag, List, ListError, Node};
 pub use pallet::*;
 pub use weights::WeightInfo;
 
-pub(crate) const LOG_TARGET: &str = "runtime::bags_list";
+pub(crate) const LOG_TARGET: &str = "runtime::bags-list";
 
 // syntactic sugar for logging.
 #[macro_export]
diff --git a/substrate/frame/bags-list/src/list/mod.rs b/substrate/frame/bags-list/src/list/mod.rs
index 696b64d40e9..6b0d1afcd8b 100644
--- a/substrate/frame/bags-list/src/list/mod.rs
+++ b/substrate/frame/bags-list/src/list/mod.rs
@@ -245,7 +245,7 @@ impl<T: Config<I>, I: 'static> List<T, I> {
 	/// Iterate over all nodes in all bags in the list.
 	///
 	/// Full iteration can be expensive; it's recommended to limit the number of items with
-	/// `.take(n)`.
+	/// `.take(n)`, or call `.next()` one by one.
 	pub(crate) fn iter() -> impl Iterator<Item = Node<T, I>> {
 		// We need a touch of special handling here: because we permit `T::BagThresholds` to
 		// omit the final bound, we need to ensure that we explicitly include that threshold in the
@@ -292,6 +292,13 @@ impl<T: Config<I>, I: 'static> List<T, I> {
 			.filter_map(Bag::get)
 			.flat_map(|bag| bag.iter());
 
+		crate::log!(
+			debug,
+			"starting to iterate from {:?}, who's bag is {:?}, and there are {:?} leftover bags",
+			&start,
+			start_node_upper,
+			idx
+		);
 		Ok(start_bag.chain(leftover_bags))
 	}
 
@@ -331,7 +338,7 @@ impl<T: Config<I>, I: 'static> List<T, I> {
 		bag.put();
 
 		crate::log!(
-			debug,
+			trace,
 			"inserted {:?} with score {:?} into bag {:?}, new count is {}",
 			id,
 			score,
diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml
index b8e952dfbd6..1cb4c41f41b 100644
--- a/substrate/frame/beefy/Cargo.toml
+++ b/substrate/frame/beefy/Cargo.toml
@@ -36,6 +36,7 @@ sp-core = { workspace = true, default-features = true }
 sp-io = { workspace = true, default-features = true }
 sp-staking = { workspace = true, default-features = true }
 sp-state-machine = { workspace = true }
+sp-tracing = { workspace = true, default-features = true }
 
 [features]
 default = ["std"]
diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs
index fc731e3bc50..2f90edf3c35 100644
--- a/substrate/frame/beefy/src/mock.rs
+++ b/substrate/frame/beefy/src/mock.rs
@@ -16,9 +16,6 @@
 // limitations under the License.
 
 use codec::{Decode, Encode};
-use scale_info::TypeInfo;
-use std::vec;
-
 use frame_election_provider_support::{
 	bounds::{ElectionBounds, ElectionBoundsBuilder},
 	onchain, SequentialPhragmen, Weight,
@@ -29,14 +26,15 @@ use frame_support::{
 };
 use frame_system::pallet_prelude::HeaderFor;
 use pallet_session::historical as pallet_session_historical;
-use sp_core::{crypto::KeyTypeId, ConstU128};
+use scale_info::TypeInfo;
+use sp_core::{crypto::KeyTypeId, ConstBool, ConstU128};
 use sp_runtime::{
 	app_crypto::ecdsa::Public,
 	curve::PiecewiseLinear,
 	impl_opaque_keys,
 	testing::TestXt,
 	traits::{Header as HeaderT, OpaqueKeys},
-	BuildStorage, Perbill,
+	BoundedVec, BuildStorage, Perbill,
 };
 use sp_staking::{EraIndex, SessionIndex};
 use sp_state_machine::BasicExternalities;
@@ -237,7 +235,9 @@ impl onchain::Config for OnChainSeqPhragmen {
 	type Solver = SequentialPhragmen<u64, Perbill>;
 	type DataProvider = Staking;
 	type WeightInfo = ();
-	type MaxWinners = ConstU32<100>;
+	type MaxWinnersPerPage = ConstU32<100>;
+	type MaxBackersPerWinner = ConstU32<100>;
+	type Sort = ConstBool<true>;
 	type Bounds = ElectionsBoundsOnChain;
 }
 
@@ -277,6 +277,7 @@ impl ExtBuilder {
 	}
 
 	pub fn build(self) -> sp_io::TestExternalities {
+		sp_tracing::try_init_simple();
 		let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 
 		let balances: Vec<_> =
@@ -313,7 +314,7 @@ impl ExtBuilder {
 			validator_count: 2,
 			force_era: pallet_staking::Forcing::ForceNew,
 			minimum_validator_count: 0,
-			invulnerables: vec![],
+			invulnerables: BoundedVec::new(),
 			..Default::default()
 		};
 
diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs
index 1bd0a72b25e..5f713a41caf 100644
--- a/substrate/frame/beefy/src/tests.rs
+++ b/substrate/frame/beefy/src/tests.rs
@@ -39,6 +39,8 @@ use crate::{self as beefy, mock::*, Call, Config, Error, WeightInfoExt};
 
 fn init_block(block: u64) {
 	System::set_block_number(block);
+	// Staking has to also be initialized, and be the first, to have the new validator set ready.
+	Staking::on_initialize(block);
 	Session::on_initialize(block);
 }
 
diff --git a/substrate/frame/benchmarking/src/lib.rs b/substrate/frame/benchmarking/src/lib.rs
index 6e21356e9d4..0af02ccc1af 100644
--- a/substrate/frame/benchmarking/src/lib.rs
+++ b/substrate/frame/benchmarking/src/lib.rs
@@ -381,7 +381,7 @@ pub use v1::*;
 ///
 ///     #[extrinsic_call]
 ///     _(RuntimeOrigin::Signed(caller), vec![0u8; l]);
-///     
+///
 ///     // Everything onwards will be treated as test.
 ///     assert_last_event::<T>(Event::FooExecuted { result: Ok(()) }.into());
 ///     Ok(())
diff --git a/substrate/frame/delegated-staking/src/mock.rs b/substrate/frame/delegated-staking/src/mock.rs
index 42b876d049a..003d3380f66 100644
--- a/substrate/frame/delegated-staking/src/mock.rs
+++ b/substrate/frame/delegated-staking/src/mock.rs
@@ -24,7 +24,7 @@ use frame_support::{
 	PalletId,
 };
 
-use sp_runtime::{traits::IdentityLookup, BuildStorage, Perbill};
+use sp_runtime::{traits::IdentityLookup, BoundedVec, BuildStorage, Perbill};
 
 use frame_election_provider_support::{
 	bounds::{ElectionBounds, ElectionBoundsBuilder},
@@ -32,7 +32,7 @@ use frame_election_provider_support::{
 };
 use frame_support::dispatch::RawOrigin;
 use pallet_staking::{ActiveEra, ActiveEraInfo, CurrentEra};
-use sp_core::U256;
+use sp_core::{ConstBool, U256};
 use sp_runtime::traits::Convert;
 use sp_staking::{Agent, Stake, StakingInterface};
 
@@ -96,7 +96,9 @@ impl onchain::Config for OnChainSeqPhragmen {
 	type Solver = SequentialPhragmen<Balance, sp_runtime::Perbill>;
 	type DataProvider = Staking;
 	type WeightInfo = ();
-	type MaxWinners = ConstU32<100>;
+	type MaxWinnersPerPage = ConstU32<100>;
+	type MaxBackersPerWinner = ConstU32<100>;
+	type Sort = ConstBool<true>;
 	type Bounds = ElectionsBoundsOnChain;
 }
 
@@ -220,7 +222,7 @@ impl ExtBuilder {
 			// ideal validator count
 			validator_count: 2,
 			minimum_validator_count: 1,
-			invulnerables: vec![],
+			invulnerables: BoundedVec::new(),
 			slash_reward_fraction: Perbill::from_percent(10),
 			min_nominator_bond: ExistentialDeposit::get(),
 			min_validator_bond: ExistentialDeposit::get(),
diff --git a/substrate/frame/election-provider-multi-block/Cargo.toml b/substrate/frame/election-provider-multi-block/Cargo.toml
new file mode 100644
index 00000000000..907523d2883
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/Cargo.toml
@@ -0,0 +1,84 @@
+[package]
+name = "pallet-election-provider-multi-block"
+version = "0.9.0"
+authors.workspace = true
+edition.workspace = true
+license = "Apache-2.0"
+homepage.workspace = true
+repository.workspace = true
+description = "PALLET multi phase+block election providers"
+
+[lints]
+workspace = true
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[dependencies]
+codec = { features = [
+	"derive",
+], workspace = true }
+log = { workspace = true }
+scale-info = { features = [
+	"derive",
+], workspace = true }
+
+frame-election-provider-support = { workspace = true }
+frame-support = { workspace = true }
+frame-system = { workspace = true }
+
+sp-arithmetic = { workspace = true }
+sp-core = { workspace = true }
+sp-io = { workspace = true }
+sp-npos-elections = { workspace = true }
+sp-runtime = { workspace = true }
+sp-std = { workspace = true }
+
+# Optional imports for benchmarking
+frame-benchmarking = { optional = true, workspace = true }
+rand = { features = ["alloc", "small_rng"], optional = true, workspace = true }
+
+[dev-dependencies]
+frame-benchmarking = { workspace = true, default-features = true }
+pallet-balances = { workspace = true, default-features = true }
+parking_lot = { workspace = true, default-features = true }
+sp-core = { workspace = true }
+sp-io = { workspace = true, default-features = true }
+sp-tracing = { workspace = true, default-features = true }
+
+[features]
+default = ["std"]
+std = [
+	"codec/std",
+	"frame-benchmarking?/std",
+	"frame-election-provider-support/std",
+	"frame-support/std",
+	"frame-system/std",
+	"log/std",
+	"pallet-balances/std",
+	"rand/std",
+	"scale-info/std",
+	"sp-arithmetic/std",
+	"sp-core/std",
+	"sp-io/std",
+	"sp-npos-elections/std",
+	"sp-runtime/std",
+	"sp-std/std",
+	"sp-tracing/std",
+]
+runtime-benchmarks = [
+	"frame-benchmarking/runtime-benchmarks",
+	"frame-election-provider-support/runtime-benchmarks",
+	"frame-support/runtime-benchmarks",
+	"frame-system/runtime-benchmarks",
+	"pallet-balances/runtime-benchmarks",
+	"rand",
+	"sp-runtime/runtime-benchmarks",
+]
+try-runtime = [
+	"frame-election-provider-support/try-runtime",
+	"frame-support/try-runtime",
+	"frame-system/try-runtime",
+	"pallet-balances/try-runtime",
+	"sp-runtime/try-runtime",
+]
diff --git a/substrate/frame/election-provider-multi-block/src/benchmarking.rs b/substrate/frame/election-provider-multi-block/src/benchmarking.rs
new file mode 100644
index 00000000000..2af6e6747a7
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/benchmarking.rs
@@ -0,0 +1,170 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::{Config, CurrentPhase, Pallet, Phase, Snapshot};
+use frame_benchmarking::v2::*;
+use frame_election_provider_support::ElectionDataProvider;
+use frame_support::pallet_prelude::*;
+const SNAPSHOT_NOT_BIG_ENOUGH: &'static str = "Snapshot page is not full, you should run this \
+benchmark with enough genesis stakers in staking (DataProvider) to fill a page of voters/targets \
+as per VoterSnapshotPerBlock and TargetSnapshotPerBlock. Generate at least \
+2 * VoterSnapshotPerBlock) nominators and TargetSnapshotPerBlock validators";
+
+#[benchmarks(where T: crate::signed::Config + crate::unsigned::Config + crate::verifier::Config)]
+mod benchmarks {
+	use super::*;
+
+	#[benchmark]
+	fn on_initialize_nothing() -> Result<(), BenchmarkError> {
+		T::DataProvider::set_next_election(Pallet::<T>::reasonable_next_election());
+		assert_eq!(CurrentPhase::<T>::get(), Phase::Off);
+
+		#[block]
+		{
+			Pallet::<T>::roll_next(true, false);
+		}
+
+		assert_eq!(CurrentPhase::<T>::get(), Phase::Off);
+		Ok(())
+	}
+
+	#[benchmark]
+	fn on_initialize_into_snapshot_msp() -> Result<(), BenchmarkError> {
+		assert!(T::Pages::get() >= 2, "this benchmark only works in a runtime with 2 pages or more, set at least `type Pages = 2` for benchmark run");
+		T::DataProvider::set_next_election(Pallet::<T>::reasonable_next_election());
+		// TODO: the results of this benchmark cause too many hits to voters bags list, why???
+
+		// roll to next block until we are about to go into the snapshot.
+		Pallet::<T>::run_until_before_matches(|| {
+			matches!(CurrentPhase::<T>::get(), Phase::Snapshot(_))
+		});
+
+		// since we reverted the last page, we are still in phase Off.
+		assert_eq!(CurrentPhase::<T>::get(), Phase::Off);
+
+		#[block]
+		{
+			Pallet::<T>::roll_next(true, false);
+		}
+
+		assert_eq!(CurrentPhase::<T>::get(), Phase::Snapshot(T::Pages::get() - 1));
+		assert_eq!(
+			Snapshot::<T>::voters_decode_len(T::Pages::get() - 1).unwrap() as u32,
+			T::VoterSnapshotPerBlock::get(),
+			"{}",
+			SNAPSHOT_NOT_BIG_ENOUGH
+		);
+		assert_eq!(
+			Snapshot::<T>::targets_decode_len().unwrap() as u32,
+			T::TargetSnapshotPerBlock::get(),
+			"{}",
+			SNAPSHOT_NOT_BIG_ENOUGH
+		);
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn on_initialize_into_snapshot_rest() -> Result<(), BenchmarkError> {
+		assert!(T::Pages::get() >= 2, "this benchmark only works in a runtime with 2 pages or more, set at least `type Pages = 2` for benchmark run");
+		T::DataProvider::set_next_election(Pallet::<T>::reasonable_next_election());
+
+		// roll to the first block of the snapshot.
+		Pallet::<T>::roll_until_matches(|| matches!(CurrentPhase::<T>::get(), Phase::Snapshot(_)));
+
+		assert_eq!(CurrentPhase::<T>::get(), Phase::Snapshot(T::Pages::get() - 1));
+
+		// take one more snapshot page.
+		#[block]
+		{
+			Pallet::<T>::roll_next(true, false);
+		}
+
+		assert_eq!(CurrentPhase::<T>::get(), Phase::Snapshot(T::Pages::get() - 2));
+		assert_eq!(
+			Snapshot::<T>::voters_decode_len(T::Pages::get() - 2).unwrap() as u32,
+			T::VoterSnapshotPerBlock::get(),
+			"{}",
+			SNAPSHOT_NOT_BIG_ENOUGH
+		);
+		Ok(())
+	}
+
+	#[benchmark]
+	fn on_initialize_into_signed() -> Result<(), BenchmarkError> {
+		T::DataProvider::set_next_election(Pallet::<T>::reasonable_next_election());
+		Pallet::<T>::run_until_before_matches(|| matches!(CurrentPhase::<T>::get(), Phase::Signed));
+
+		assert_eq!(CurrentPhase::<T>::get(), Phase::Snapshot(0));
+
+		#[block]
+		{
+			Pallet::<T>::roll_next(true, false);
+		}
+
+		assert_eq!(CurrentPhase::<T>::get(), Phase::Signed);
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn on_initialize_into_signed_validation() -> Result<(), BenchmarkError> {
+		T::DataProvider::set_next_election(Pallet::<T>::reasonable_next_election());
+		Pallet::<T>::run_until_before_matches(|| {
+			matches!(CurrentPhase::<T>::get(), Phase::SignedValidation(_))
+		});
+
+		assert_eq!(CurrentPhase::<T>::get(), Phase::Signed);
+
+		#[block]
+		{
+			Pallet::<T>::roll_next(true, false);
+		}
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn on_initialize_into_unsigned() -> Result<(), BenchmarkError> {
+		T::DataProvider::set_next_election(Pallet::<T>::reasonable_next_election());
+		Pallet::<T>::run_until_before_matches(|| {
+			matches!(CurrentPhase::<T>::get(), Phase::Unsigned(_))
+		});
+		assert!(matches!(CurrentPhase::<T>::get(), Phase::SignedValidation(_)));
+
+		#[block]
+		{
+			Pallet::<T>::roll_next(true, false);
+		}
+
+		assert!(matches!(CurrentPhase::<T>::get(), Phase::Unsigned(_)));
+		Ok(())
+	}
+
+	#[benchmark]
+	fn manage() -> Result<(), BenchmarkError> {
+		#[block]
+		{}
+		Ok(())
+	}
+
+	impl_benchmark_test_suite!(
+		Pallet,
+		crate::mock::ExtBuilder::full().build_unchecked(),
+		crate::mock::Runtime
+	);
+}
diff --git a/substrate/frame/election-provider-multi-block/src/helpers.rs b/substrate/frame/election-provider-multi-block/src/helpers.rs
new file mode 100644
index 00000000000..20396ac97d2
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/helpers.rs
@@ -0,0 +1,227 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Some helper functions/macros for this crate.
+
+use crate::{
+	types::{PageIndex, VoterOf},
+	unsigned::miner::MinerConfig,
+	AllVoterPagesOf, SolutionTargetIndexOf, SolutionVoterIndexOf, VoteWeight,
+};
+use frame_support::{traits::Get, BoundedVec};
+use sp_runtime::SaturatedConversion;
+use sp_std::{collections::btree_map::BTreeMap, convert::TryInto, prelude::*};
+
+/// Emit a log specific to this pallet, setting the target to [`crate::LOG_PREFIX`]
+#[macro_export]
+macro_rules! log {
+	($level:tt, $pattern:expr $(, $values:expr)* $(,)?) => {
+		log::$level!(
+			target: $crate::LOG_PREFIX,
+			concat!("[#{:?}] 🗳🗳🗳  ", $pattern), <frame_system::Pallet<T>>::block_number() $(, $values)*
+		)
+	};
+}
+
+/// Emit a log within a submodule of the pallet
+#[macro_export]
+macro_rules! sublog {
+	($level:tt, $sub_pallet:tt, $pattern:expr $(, $values:expr)* $(,)?) => {
+		#[cfg(not(feature = "std"))]
+		log!($level, $pattern $(, $values )*);
+		#[cfg(feature = "std")]
+		log::$level!(
+			target: format!("{}::{}", $crate::LOG_PREFIX, $sub_pallet).as_ref(),
+			concat!("[#{:?}] 🗳🗳🗳  ", $pattern), <frame_system::Pallet<T>>::block_number() $(, $values )*
+		)
+	};
+}
+
+/// Emit a log from within the offchain miner.
+#[macro_export]
+macro_rules! miner_log {
+	($level:tt, $pattern:expr $(, $values:expr)* $(,)?) => {
+		log::$level!(
+			target: $crate::LOG_PREFIX,
+			concat!("[⛏️miner] 🗳🗳🗳  ", $pattern) $(, $values)*
+		)
+	};
+}
+
+/// Generate an `efficient closure of voters and the page in which they live in.
+pub(crate) fn generate_voter_page_fn<T: MinerConfig>(
+	paged_snapshot: &AllVoterPagesOf<T>,
+) -> impl Fn(&T::AccountId) -> Option<PageIndex> {
+	let mut cache: BTreeMap<T::AccountId, PageIndex> = BTreeMap::new();
+	paged_snapshot
+		.iter()
+		.enumerate()
+		.map(|(page, whatever)| (page.saturated_into::<PageIndex>(), whatever))
+		.for_each(|(page, page_voters)| {
+			page_voters.iter().for_each(|(v, _, _)| {
+				let _existed = cache.insert(v.clone(), page);
+				// if a duplicate exists, we only consider the last one. Defensive only, should
+				// never happen.
+				debug_assert!(_existed.is_none());
+			});
+		});
+	move |who| cache.get(who).copied()
+}
+
+/// Generate a btree-map cache of the voters and their indices within the provided `snapshot`.
+///
+/// This does not care about pagination. `snapshot` might be a single page or the entire blob of
+/// voters.
+///
+/// This can be used to efficiently build index getter closures.
+pub(crate) fn generate_voter_cache<T: MinerConfig, AnyBound: Get<u32>>(
+	snapshot: &BoundedVec<VoterOf<T>, AnyBound>,
+) -> BTreeMap<T::AccountId, usize> {
+	let mut cache: BTreeMap<T::AccountId, usize> = BTreeMap::new();
+	snapshot.iter().enumerate().for_each(|(i, (x, _, _))| {
+		let _existed = cache.insert(x.clone(), i);
+		// if a duplicate exists, we only consider the last one. Defensive only, should never
+		// happen.
+		debug_assert!(_existed.is_none());
+	});
+
+	cache
+}
+
+/// Create a function that returns the index of a voter in the snapshot.
+///
+/// Same as [`voter_index_fn`] but the returned function owns all its necessary data; nothing is
+/// borrowed.
+pub(crate) fn voter_index_fn_owned<T: MinerConfig>(
+	cache: BTreeMap<T::AccountId, usize>,
+) -> impl Fn(&T::AccountId) -> Option<SolutionVoterIndexOf<T>> {
+	move |who| {
+		cache
+			.get(who)
+			.and_then(|i| <usize as TryInto<SolutionVoterIndexOf<T>>>::try_into(*i).ok())
+	}
+}
+
+/// Same as [`voter_index_fn`], but the returning index is converted into usize, if possible.
+///
+/// ## Warning
+///
+/// Note that this will represent the snapshot data from which the `cache` is generated.
+pub(crate) fn voter_index_fn_usize<T: MinerConfig>(
+	cache: &BTreeMap<T::AccountId, usize>,
+) -> impl Fn(&T::AccountId) -> Option<usize> + '_ {
+	move |who| cache.get(who).cloned()
+}
+
+/// A non-optimized, linear version of [`voter_index_fn`] that does not need a cache and does a
+/// linear search.
+///
+/// ## Warning
+///
+/// Not meant to be used in production.
+#[cfg(test)]
+pub(crate) fn voter_index_fn_linear<T: MinerConfig>(
+	snapshot: &Vec<VoterOf<T>>,
+) -> impl Fn(&T::AccountId) -> Option<SolutionVoterIndexOf<T>> + '_ {
+	move |who| {
+		snapshot
+			.iter()
+			.position(|(x, _, _)| x == who)
+			.and_then(|i| <usize as TryInto<SolutionVoterIndexOf<T>>>::try_into(i).ok())
+	}
+}
+
+/// Create a function that returns the index of a target in the snapshot.
+///
+/// The returned index type is the same as the one defined in `T::Solution::Target`.
+///
+/// Note: to the extent possible, the returned function should be cached and reused. Producing that
+/// function requires a `O(n log n)` data transform. Each invocation of that function completes
+/// in `O(log n)`.
+pub(crate) fn target_index_fn<T: MinerConfig>(
+	snapshot: &Vec<T::AccountId>,
+) -> impl Fn(&T::AccountId) -> Option<SolutionTargetIndexOf<T>> + '_ {
+	let cache: BTreeMap<_, _> =
+		snapshot.iter().enumerate().map(|(idx, account_id)| (account_id, idx)).collect();
+	move |who| {
+		cache
+			.get(who)
+			.and_then(|i| <usize as TryInto<SolutionTargetIndexOf<T>>>::try_into(*i).ok())
+	}
+}
+
+/// Create a function the returns the index to a target in the snapshot.
+///
+/// The returned index type is the same as the one defined in `T::Solution::Target`.
+///
+/// ## Warning
+///
+/// Not meant to be used in production.
+#[cfg(test)]
+pub(crate) fn target_index_fn_linear<T: MinerConfig>(
+	snapshot: &Vec<T::AccountId>,
+) -> impl Fn(&T::AccountId) -> Option<SolutionTargetIndexOf<T>> + '_ {
+	move |who| {
+		snapshot
+			.iter()
+			.position(|x| x == who)
+			.and_then(|i| <usize as TryInto<SolutionTargetIndexOf<T>>>::try_into(i).ok())
+	}
+}
+
+/// Create a function that can map a voter index ([`SolutionVoterIndexOf`]) to the actual voter
+/// account using a linearly indexible snapshot.
+pub(crate) fn voter_at_fn<T: MinerConfig>(
+	snapshot: &Vec<VoterOf<T>>,
+) -> impl Fn(SolutionVoterIndexOf<T>) -> Option<T::AccountId> + '_ {
+	move |i| {
+		<SolutionVoterIndexOf<T> as TryInto<usize>>::try_into(i)
+			.ok()
+			.and_then(|i| snapshot.get(i).map(|(x, _, _)| x).cloned())
+	}
+}
+
+/// Create a function that can map a target index ([`SolutionTargetIndexOf`]) to the actual target
+/// account using a linearly indexible snapshot.
+pub(crate) fn target_at_fn<T: MinerConfig>(
+	snapshot: &Vec<T::AccountId>,
+) -> impl Fn(SolutionTargetIndexOf<T>) -> Option<T::AccountId> + '_ {
+	move |i| {
+		<SolutionTargetIndexOf<T> as TryInto<usize>>::try_into(i)
+			.ok()
+			.and_then(|i| snapshot.get(i).cloned())
+	}
+}
+
+/// Create a function to get the stake of a voter.
+///
+/// ## Warning
+///
+/// The cache need must be derived from the same snapshot. Zero is returned if a voter is
+/// non-existent.
+pub(crate) fn stake_of_fn<'a, T: MinerConfig, AnyBound: Get<u32>>(
+	snapshot: &'a BoundedVec<VoterOf<T>, AnyBound>,
+	cache: &'a BTreeMap<T::AccountId, usize>,
+) -> impl Fn(&T::AccountId) -> VoteWeight + 'a {
+	move |who| {
+		if let Some(index) = cache.get(who) {
+			snapshot.get(*index).map(|(_, x, _)| x).cloned().unwrap_or_default()
+		} else {
+			0
+		}
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/lib.rs b/substrate/frame/election-provider-multi-block/src/lib.rs
new file mode 100644
index 00000000000..355f117bc45
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/lib.rs
@@ -0,0 +1,2556 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! # Multi-phase, multi-block, election provider pallet.
+//!
+//! ## Overall idea
+//!
+//! `pallet_election_provider_multi_phase` provides the basic ability for NPoS solutions to be
+//! computed offchain (essentially anywhere) and submitted back to the chain as signed or unsigned
+//! transaction, with sensible configurations and fail-safe mechanisms to ensure system safety.
+//! Nonetheless, it has a limited capacity in terms of number of voters it can process in a **single
+//! block**.
+//!
+//! This pallet takes `pallet_election_provider_multi_phase`, keeps most of its ideas and core
+//! premises, and extends it to support paginated, multi-block operations. The final goal of this
+//! pallet is scale linearly with the number of blocks allocated to the elections. Moreover, the
+//! amount of work that it does in one block should be bounded and measurable, making it suitable
+//! for a parachain. In principle, with large enough blocks (in a dedicated parachain), the number
+//! of voters included in the NPoS system can grow significantly (yet, obviously not indefinitely).
+//!
+//! Note that this pallet does not consider how the recipient is processing the results. To ensure
+//! scalability, of course, the recipient of this pallet's data (i.e. `pallet-staking`) must also be
+//! capable of pagination and multi-block processing.
+//!
+//! ## Companion pallets
+//!
+//! This pallet is essentially hierarchical. This particular one is the top level one. It contains
+//! the shared information that all child pallets use. All child pallets depend on the top level
+//! pallet ONLY, but not the other way around. For those cases, traits are used.
+//!
+//! This pallet will only function in a sensible way if it is peered with its companion pallets.
+//!
+//! - The [`verifier`] pallet provides a standard implementation of the [`verifier::Verifier`]. This
+//!   pallet is mandatory.
+//! - The [`unsigned`] module provides the implementation of unsigned submission by validators. If
+//!   this pallet is included, then [`Config::UnsignedPhase`] will determine its duration.
+//! - The [`signed`] module provides the implementation of the signed submission by any account. If
+//!   this pallet is included, the combined [`Config::SignedPhase`] and
+//!   [`Config::SignedValidationPhase`] will determine its duration
+//!
+//! ### Pallet Ordering:
+//!
+//! The ordering of these pallets in a runtime should be:
+//! 1. parent
+//! 2. verifier
+//! 3. signed
+//! 4. unsigned
+//!
+//! This is critical for the phase transition to work.
+//!
+//! This should be manually checked, there is not automated way to test it.
+//!
+//! ## Pagination
+//!
+//! Most of the external APIs of this pallet are paginated. All pagination follow a patter where if
+//! `N` pages exist, the first paginated call is `function(N-1)` and the last one is `function(0)`.
+//! For example, with 3 pages, the `elect` of [`ElectionProvider`] is expected to be called as
+//! `elect(2) -> elect(1) -> elect(0)`. In essence, calling a paginated function with index 0 is
+//! always a signal of termination, meaning that no further calls will follow.
+//!
+//! ## Phases
+//!
+//! The timeline of pallet is overall as follows:
+//!
+//! ```ignore
+//!  <  Off  >
+//! 0 ------- 12 13 14 15 ----------- 20 ---------25 ------- 30
+//! 	           |       |              |            |          |
+//! 	     Snapshot      Signed   SignedValidation  Unsigned   Elect
+//! ```
+//!
+//! * Duration of `Snapshot` is determined by [`Config::Pages`].
+//! * Duration of `Signed`, `SignedValidation` and `Unsigned` are determined by
+//!   [`Config::SignedPhase`], [`Config::SignedValidationPhase`] and [`Config::UnsignedPhase`]
+//!   respectively.
+//! * [`Config::Pages`] calls to elect are expected, but all in all the pallet will close a round
+//!   once `elect(0)` is called.
+//! * The pallet strives to be ready for the first call to `elect`, for example `elect(2)` if 3
+//!   pages.
+//! * This pallet can be commanded to to be ready sooner with [`Config::Lookahead`].
+//!
+//! > Given this, it is rather important for the user of this pallet to ensure it always terminates
+//! > election via `elect` before requesting a new one.
+//!
+//! ## Feasible Solution (correct solution)
+//!
+//! All submissions must undergo a feasibility check. Signed solutions are checked on by one at the
+//! end of the signed phase, and the unsigned solutions are checked on the spot. A feasible solution
+//! is as follows:
+//!
+//! 0. **all** of the used indices must be correct.
+//! 1. present *exactly* correct number of winners.
+//! 2. any assignment is checked to match with `PagedVoterSnapshot`.
+//! 3. the claimed score is valid, based on the fixed point arithmetic accuracy.
+//!
+//! ### Emergency Phase and Fallback
+//!
+//! * [`Config::Fallback`] is called on each page. It typically may decide to:
+//!
+//! 1. Do nothing,
+//! 2. Force us into the emergency phase
+//! 3. computer an onchain from the give page of snapshot. Note that this will be sub-optimal,
+//!    because the proper pagination size of snapshot and fallback will likely differ a lot.
+//!
+//! Note that configuring the fallback to be onchain computation is not recommended, unless for
+//! test-nets for a number of reasons:
+//!
+//! 1. The solution score of fallback is never checked to be match the "minimum" score. That being
+//!    said, the computation happens onchain so we can trust it.
+//! 2. The onchain fallback runs on the same number of voters and targets that reside on a single
+//!    page of a snapshot, which will very likely be too much for actual onchain computation. Yet,
+//!    we don't have another choice as we cannot request another smaller snapshot from the data
+//!    provider mid-election without more bookkeeping on the staking side.
+//!
+//! If onchain solution is to be seriously considered, an improvement to this pallet should
+//! re-request a smaller set of voters from `T::DataProvider` in a stateless manner.
+//!
+//! ### Signed Phase
+//!
+//! Signed phase is when an offchain miner, aka, `polkadot-staking-miner` should operate upon. See
+//! [`signed`] for more information.
+//!
+//! ## Unsigned Phase
+//!
+//! Unsigned phase is a built-in fallback in which validators may submit a single page election,
+//! taking into account only the [`ElectionProvider::msp`] (_most significant page_). See
+//! [`crate::unsigned`] for more information.
+
+// Implementation notes:
+//
+// - Naming convention is: `${singular}_page` for singular, e.g. `voter_page` for `Vec<Voter>`.
+//   `paged_${plural}` for plural, e.g. `paged_voters` for `Vec<Vec<Voter>>`.
+//
+// - Since this crate has multiple `Pallet` and `Configs`, in each sub-pallet, we only reference the
+//   local `Pallet` without a prefix and allow it to be imported via `use`. Avoid `super::Pallet`
+//   except for the case of a modules that want to reference their local `Pallet` . The
+//   `crate::Pallet` is always reserved for the parent pallet. Other sibling pallets must be
+//   referenced with full path, e.g. `crate::Verifier::Pallet`. Do NOT write something like `use
+//   unsigned::Pallet as UnsignedPallet`.
+//
+// - Respecting private storage items with wrapper We move all implementations out of the `mod
+//   pallet` as much as possible to ensure we NEVER access the internal storage items directly. All
+//   operations should happen with the wrapper types.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+use crate::types::*;
+use codec::{Decode, Encode, MaxEncodedLen};
+use frame_election_provider_support::{
+	onchain, BoundedSupportsOf, DataProviderBounds, ElectionDataProvider, ElectionProvider,
+	InstantElectionProvider,
+};
+use frame_support::{
+	pallet_prelude::*,
+	traits::{Defensive, EnsureOrigin},
+	DebugNoBound, Twox64Concat,
+};
+use frame_system::pallet_prelude::*;
+use scale_info::TypeInfo;
+use sp_arithmetic::{
+	traits::{CheckedAdd, Zero},
+	PerThing, UpperOf,
+};
+use sp_npos_elections::VoteWeight;
+use sp_runtime::{
+	traits::{Hash, Saturating},
+	SaturatedConversion,
+};
+use sp_std::{borrow::ToOwned, boxed::Box, prelude::*};
+use verifier::Verifier;
+
+#[cfg(test)]
+mod mock;
+#[macro_use]
+pub mod helpers;
+#[cfg(feature = "runtime-benchmarks")]
+pub mod benchmarking;
+
+/// The common logginv prefix of all pallets in this crate.
+pub const LOG_PREFIX: &'static str = "runtime::multiblock-election";
+
+macro_rules! clear_paged_map {
+	($map: ty) => {{
+		let __r = <$map>::clear(u32::MAX, None);
+		debug_assert!(__r.unique <= T::Pages::get(), "clearing map caused too many removals")
+	}};
+}
+
+/// The signed pallet
+pub mod signed;
+/// Common types of the pallet
+pub mod types;
+/// The unsigned pallet
+pub mod unsigned;
+/// The verifier pallet
+pub mod verifier;
+/// The weight module
+pub mod weights;
+
+pub use pallet::*;
+pub use types::*;
+pub use weights::measured::pallet_election_provider_multi_block::WeightInfo;
+
+/// A fallback implementation that transitions the pallet to the emergency phase.
+pub struct InitiateEmergencyPhase<T>(sp_std::marker::PhantomData<T>);
+impl<T: Config> ElectionProvider for InitiateEmergencyPhase<T> {
+	type AccountId = T::AccountId;
+	type BlockNumber = BlockNumberFor<T>;
+	type DataProvider = T::DataProvider;
+	type Error = &'static str;
+	type Pages = T::Pages;
+	type MaxBackersPerWinner = <T::Verifier as Verifier>::MaxBackersPerWinner;
+	type MaxWinnersPerPage = <T::Verifier as Verifier>::MaxWinnersPerPage;
+
+	fn elect(_page: PageIndex) -> Result<BoundedSupportsOf<Self>, Self::Error> {
+		Pallet::<T>::phase_transition(Phase::Emergency);
+		Err("Emergency phase started.")
+	}
+
+	fn ongoing() -> bool {
+		false
+	}
+}
+
+impl<T: Config> InstantElectionProvider for InitiateEmergencyPhase<T> {
+	fn instant_elect(
+		_voters: Vec<VoterOf<T::MinerConfig>>,
+		_targets: Vec<Self::AccountId>,
+		_desired_targets: u32,
+	) -> Result<BoundedSupportsOf<Self>, Self::Error> {
+		Self::elect(0)
+	}
+
+	fn bother() -> bool {
+		false
+	}
+}
+
+/// A fallback implementation that silently continues into the next page.
+///
+/// This is suitable for onchain usage.
+pub struct Continue<T>(sp_std::marker::PhantomData<T>);
+impl<T: Config> ElectionProvider for Continue<T> {
+	type AccountId = T::AccountId;
+	type BlockNumber = BlockNumberFor<T>;
+	type DataProvider = T::DataProvider;
+	type Error = &'static str;
+	type Pages = T::Pages;
+	type MaxBackersPerWinner = <T::Verifier as Verifier>::MaxBackersPerWinner;
+	type MaxWinnersPerPage = <T::Verifier as Verifier>::MaxWinnersPerPage;
+
+	fn elect(_page: PageIndex) -> Result<BoundedSupportsOf<Self>, Self::Error> {
+		log!(warn, "'Continue' fallback will do nothing");
+		Err("'Continue' fallback will do nothing")
+	}
+
+	fn ongoing() -> bool {
+		false
+	}
+}
+
+impl<T: Config> InstantElectionProvider for Continue<T> {
+	fn instant_elect(
+		_voters: Vec<VoterOf<T::MinerConfig>>,
+		_targets: Vec<Self::AccountId>,
+		_desired_targets: u32,
+	) -> Result<BoundedSupportsOf<Self>, Self::Error> {
+		Self::elect(0)
+	}
+
+	fn bother() -> bool {
+		false
+	}
+}
+
+/// Internal errors of the pallet. This is used in the implementation of [`ElectionProvider`].
+///
+/// Note that this is different from [`pallet::Error`].
+#[derive(
+	frame_support::DebugNoBound, frame_support::PartialEqNoBound, frame_support::EqNoBound,
+)]
+pub enum ElectionError<T: Config> {
+	/// An error happened in the feasibility check sub-system.
+	Feasibility(verifier::FeasibilityError),
+	/// An error in the fallback.
+	Fallback(FallbackErrorOf<T>),
+	/// An error in the onchain seq-phragmen implementation
+	OnChain(onchain::Error),
+	/// An error happened in the data provider.
+	DataProvider(&'static str),
+	/// the corresponding page in the queued supports is not available.
+	SupportPageNotAvailable,
+	/// The election is not ongoing and therefore no results may be queried.
+	NotOngoing,
+	/// Other misc error
+	Other(&'static str),
+}
+
+impl<T: Config> From<onchain::Error> for ElectionError<T> {
+	fn from(e: onchain::Error) -> Self {
+		ElectionError::OnChain(e)
+	}
+}
+
+impl<T: Config> From<verifier::FeasibilityError> for ElectionError<T> {
+	fn from(e: verifier::FeasibilityError) -> Self {
+		ElectionError::Feasibility(e)
+	}
+}
+
+/// Different operations that the [`Config::AdminOrigin`] can perform on the pallet.
+#[derive(
+	Encode, Decode, MaxEncodedLen, TypeInfo, DebugNoBound, CloneNoBound, PartialEqNoBound, EqNoBound,
+)]
+#[codec(mel_bound(T: Config))]
+#[scale_info(skip_type_params(T))]
+pub enum AdminOperation<T: Config> {
+	/// Forcefully go to the next round, starting from the Off Phase.
+	ForceRotateRound,
+	/// Force-set the phase to the given phase.
+	///
+	/// This can have many many combinations, use only with care and sufficient testing.
+	ForceSetPhase(Phase<BlockNumberFor<T>>),
+	/// Set the given (single page) emergency solution.
+	///
+	/// Can only be called in emergency phase.
+	EmergencySetSolution(Box<BoundedSupportsOf<Pallet<T>>>, ElectionScore),
+	/// Trigger the (single page) fallback in `instant` mode, with the given parameters, and
+	/// queue it if correct.
+	///
+	/// Can only be called in emergency phase.
+	EmergencyFallback,
+	/// Set the minimum untrusted score. This is directly communicated to the verifier component to
+	/// be taken into account.
+	///
+	/// This is useful in preventing any serious issue where due to a bug we accept a very bad
+	/// solution.
+	SetMinUntrustedScore(ElectionScore),
+}
+
+#[frame_support::pallet]
+pub mod pallet {
+	use super::*;
+	#[pallet::config]
+	pub trait Config: frame_system::Config {
+		/// The overarching runtime event type.
+		type RuntimeEvent: From<Event<Self>>
+			+ IsType<<Self as frame_system::Config>::RuntimeEvent>
+			+ TryInto<Event<Self>>;
+
+		/// Duration of the unsigned phase.
+		#[pallet::constant]
+		type UnsignedPhase: Get<BlockNumberFor<Self>>;
+		/// Duration of the signed phase.
+		#[pallet::constant]
+		type SignedPhase: Get<BlockNumberFor<Self>>;
+		/// Duration of the singed validation phase.
+		///
+		/// The duration of this should not be less than `T::Pages`, and there is no point in it
+		/// being more than `SignedPhase::MaxSubmission::get() * T::Pages`. TODO: integrity test for
+		/// it.
+		#[pallet::constant]
+		type SignedValidationPhase: Get<BlockNumberFor<Self>>;
+
+		/// The number of snapshot voters to fetch per block.
+		#[pallet::constant]
+		type VoterSnapshotPerBlock: Get<u32>;
+
+		/// The number of snapshot targets to fetch per block.
+		#[pallet::constant]
+		type TargetSnapshotPerBlock: Get<u32>;
+
+		/// The number of pages.
+		///
+		/// The snapshot is created with this many keys in the storage map.
+		///
+		/// The solutions may contain at MOST this many pages, but less pages are acceptable as
+		/// well.
+		#[pallet::constant]
+		type Pages: Get<PageIndex>;
+
+		/// Something that will provide the election data.
+		type DataProvider: ElectionDataProvider<
+			AccountId = Self::AccountId,
+			BlockNumber = BlockNumberFor<Self>,
+		>;
+
+		/// The miner configuration.
+		///
+		/// These configurations are passed to [`crate::unsigned::miner::BaseMiner`]. An external
+		/// miner implementation should implement this trait, and use the said `BaseMiner`.
+		type MinerConfig: crate::unsigned::miner::MinerConfig<
+			Pages = Self::Pages,
+			AccountId = <Self as frame_system::Config>::AccountId,
+			MaxVotesPerVoter = <Self::DataProvider as ElectionDataProvider>::MaxVotesPerVoter,
+			VoterSnapshotPerBlock = Self::VoterSnapshotPerBlock,
+			TargetSnapshotPerBlock = Self::TargetSnapshotPerBlock,
+			MaxBackersPerWinner = <Self::Verifier as verifier::Verifier>::MaxBackersPerWinner,
+			MaxWinnersPerPage = <Self::Verifier as verifier::Verifier>::MaxWinnersPerPage,
+		>;
+
+		/// The fallback type used for the election.
+		type Fallback: InstantElectionProvider<
+			AccountId = Self::AccountId,
+			BlockNumber = BlockNumberFor<Self>,
+			DataProvider = Self::DataProvider,
+			MaxBackersPerWinner = <Self::Verifier as verifier::Verifier>::MaxBackersPerWinner,
+			MaxWinnersPerPage = <Self::Verifier as verifier::Verifier>::MaxWinnersPerPage,
+		>;
+
+		/// The verifier pallet's interface.
+		type Verifier: verifier::Verifier<
+				Solution = SolutionOf<Self::MinerConfig>,
+				AccountId = Self::AccountId,
+			> + verifier::AsynchronousVerifier;
+
+		/// The number of blocks ahead of time to try and have the election results ready by.
+		type Lookahead: Get<BlockNumberFor<Self>>;
+
+		/// The origin that can perform administration operations on this pallet.
+		type AdminOrigin: EnsureOrigin<Self::RuntimeOrigin>;
+
+		/// The weight of the pallet.
+		type WeightInfo: WeightInfo;
+	}
+
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {
+		/// Manage this pallet.
+		///
+		/// The origin of this call must be [`Config::AdminOrigin`].
+		///
+		/// See [`AdminOperation`] for various operations that are possible.
+		#[pallet::weight(T::WeightInfo::manage())]
+		#[pallet::call_index(0)]
+		pub fn manage(origin: OriginFor<T>, op: AdminOperation<T>) -> DispatchResultWithPostInfo {
+			use crate::verifier::Verifier;
+			use sp_npos_elections::EvaluateSupport;
+
+			let _ = T::AdminOrigin::ensure_origin(origin);
+			match op {
+				AdminOperation::EmergencyFallback => {
+					ensure!(Self::current_phase() == Phase::Emergency, Error::<T>::UnexpectedPhase);
+					// note: for now we run this on the msp, but we can make it configurable if need
+					// be.
+					let voters = Snapshot::<T>::voters(Self::msp()).ok_or(Error::<T>::Snapshot)?;
+					let targets = Snapshot::<T>::targets().ok_or(Error::<T>::Snapshot)?;
+					let desired_targets =
+						Snapshot::<T>::desired_targets().ok_or(Error::<T>::Snapshot)?;
+					let fallback = T::Fallback::instant_elect(
+						voters.into_inner(),
+						targets.into_inner(),
+						desired_targets,
+					)
+					.map_err(|e| {
+						log!(warn, "Fallback failed: {:?}", e);
+						Error::<T>::Fallback
+					})?;
+					let score = fallback.evaluate();
+					T::Verifier::force_set_single_page_valid(fallback, 0, score);
+					Ok(().into())
+				},
+				AdminOperation::EmergencySetSolution(supports, score) => {
+					ensure!(Self::current_phase() == Phase::Emergency, Error::<T>::UnexpectedPhase);
+					T::Verifier::force_set_single_page_valid(*supports, 0, score);
+					Ok(().into())
+				},
+				AdminOperation::ForceSetPhase(phase) => {
+					Self::phase_transition(phase);
+					Ok(().into())
+				},
+				AdminOperation::ForceRotateRound => {
+					Self::rotate_round();
+					Ok(().into())
+				},
+				AdminOperation::SetMinUntrustedScore(score) => {
+					T::Verifier::set_minimum_score(score);
+					Ok(().into())
+				},
+			}
+		}
+	}
+
+	#[pallet::hooks]
+	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
+		fn on_initialize(now: BlockNumberFor<T>) -> Weight {
+			// first, calculate the main phase switches thresholds.
+			let unsigned_deadline = T::UnsignedPhase::get();
+			let signed_validation_deadline =
+				T::SignedValidationPhase::get().saturating_add(unsigned_deadline);
+			let signed_deadline = T::SignedPhase::get().saturating_add(signed_validation_deadline);
+			let snapshot_deadline = signed_deadline.saturating_add(T::Pages::get().into());
+
+			let next_election = T::DataProvider::next_election_prediction(now)
+				.saturating_sub(T::Lookahead::get())
+				.max(now);
+			let remaining_blocks = next_election.saturating_sub(now);
+			let current_phase = Self::current_phase();
+
+			log!(
+				trace,
+				"current phase {:?}, next election {:?}, remaining: {:?}, deadlines: [snapshot {:?}, signed {:?}, signed_validation {:?}, unsigned {:?}]",
+				current_phase,
+				next_election,
+				remaining_blocks,
+				snapshot_deadline,
+				signed_deadline,
+				signed_validation_deadline,
+				unsigned_deadline,
+			);
+
+			match current_phase {
+				// start and continue snapshot.
+				Phase::Off if remaining_blocks <= snapshot_deadline => {
+					let remaining_pages = Self::msp();
+					Self::create_targets_snapshot().defensive_unwrap_or_default();
+					Self::create_voters_snapshot_paged(remaining_pages)
+						.defensive_unwrap_or_default();
+					Self::phase_transition(Phase::Snapshot(remaining_pages));
+					T::WeightInfo::on_initialize_into_snapshot_msp()
+				},
+				Phase::Snapshot(x) if x > 0 => {
+					// we don't check block numbers here, snapshot creation is mandatory.
+					let remaining_pages = x.saturating_sub(1);
+					Self::create_voters_snapshot_paged(remaining_pages).unwrap();
+					Self::phase_transition(Phase::Snapshot(remaining_pages));
+					T::WeightInfo::on_initialize_into_snapshot_rest()
+				},
+
+				// start signed.
+				Phase::Snapshot(0)
+					if remaining_blocks <= signed_deadline &&
+						remaining_blocks > signed_validation_deadline =>
+				{
+					// NOTE: if signed-phase length is zero, second part of the if-condition fails.
+					// TODO: even though we have the integrity test, what if we open the signed
+					// phase, and there's not enough blocks to finalize it? that can happen under
+					// any circumstance and we should deal with it.
+					Self::phase_transition(Phase::Signed);
+					T::WeightInfo::on_initialize_into_signed()
+				},
+
+				// start signed verification.
+				Phase::Signed
+					if remaining_blocks <= signed_validation_deadline &&
+						remaining_blocks > unsigned_deadline =>
+				{
+					// Start verification of the signed stuff.
+					Self::phase_transition(Phase::SignedValidation(now));
+					// we don't do anything else here. We expect the signed sub-pallet to handle
+					// whatever else needs to be done.
+					T::WeightInfo::on_initialize_into_signed_validation()
+				},
+
+				// start unsigned
+				Phase::Signed | Phase::SignedValidation(_) | Phase::Snapshot(0)
+					if remaining_blocks <= unsigned_deadline && remaining_blocks > Zero::zero() =>
+				{
+					Self::phase_transition(Phase::Unsigned(now));
+					T::WeightInfo::on_initialize_into_unsigned()
+				},
+				_ => T::WeightInfo::on_initialize_nothing(),
+			}
+		}
+
+		fn integrity_test() {
+			use sp_std::mem::size_of;
+			// The index type of both voters and targets need to be smaller than that of usize (very
+			// unlikely to be the case, but anyhow).
+			assert!(size_of::<SolutionVoterIndexOf<T::MinerConfig>>() <= size_of::<usize>());
+			assert!(size_of::<SolutionTargetIndexOf<T::MinerConfig>>() <= size_of::<usize>());
+
+			// also, because `VoterSnapshotPerBlock` and `TargetSnapshotPerBlock` are in u32, we
+			// assert that both of these types are smaller than u32 as well.
+			assert!(size_of::<SolutionVoterIndexOf<T::MinerConfig>>() <= size_of::<u32>());
+			assert!(size_of::<SolutionTargetIndexOf<T::MinerConfig>>() <= size_of::<u32>());
+
+			let pages_bn: BlockNumberFor<T> = T::Pages::get().into();
+			// pages must be at least 1.
+			assert!(T::Pages::get() > 0);
+
+			// pages + the amount of Lookahead that we expect shall not be more than the length of
+			// any phase.
+			let lookahead = T::Lookahead::get();
+			assert!(pages_bn + lookahead < T::SignedPhase::get());
+			assert!(pages_bn + lookahead < T::UnsignedPhase::get());
+
+			// Based on the requirements of [`sp_npos_elections::Assignment::try_normalize`].
+			let max_vote: usize = <SolutionOf<T::MinerConfig> as NposSolution>::LIMIT;
+
+			// 2. Maximum sum of [SolutionAccuracy; 16] must fit into `UpperOf<OffchainAccuracy>`.
+			let maximum_chain_accuracy: Vec<UpperOf<SolutionAccuracyOf<T::MinerConfig>>> = (0..
+				max_vote)
+				.map(|_| {
+					<UpperOf<SolutionAccuracyOf<T::MinerConfig>>>::from(
+						<SolutionAccuracyOf<T::MinerConfig>>::one().deconstruct(),
+					)
+				})
+				.collect();
+			let _: UpperOf<SolutionAccuracyOf<T::MinerConfig>> = maximum_chain_accuracy
+				.iter()
+				.fold(Zero::zero(), |acc, x| acc.checked_add(x).unwrap());
+
+			// We only accept data provider who's maximum votes per voter matches our
+			// `T::Solution`'s `LIMIT`.
+			//
+			// NOTE that this pallet does not really need to enforce this in runtime. The
+			// solution cannot represent any voters more than `LIMIT` anyhow.
+			assert_eq!(
+				<T::DataProvider as ElectionDataProvider>::MaxVotesPerVoter::get(),
+				<SolutionOf<T::MinerConfig> as NposSolution>::LIMIT as u32,
+			);
+
+			// The duration of the signed validation phase should be such that at least one solution
+			// can be verified.
+			assert!(
+				T::SignedValidationPhase::get() >= T::Pages::get().into(),
+				"signed validation phase should be at least as long as the number of pages."
+			);
+		}
+
+		#[cfg(feature = "try-runtime")]
+		fn try_state(now: BlockNumberFor<T>) -> Result<(), sp_runtime::TryRuntimeError> {
+			Self::do_try_state(now).map_err(Into::into)
+		}
+	}
+
+	#[pallet::event]
+	#[pallet::generate_deposit(pub(super) fn deposit_event)]
+	pub enum Event<T: Config> {
+		/// A phase transition happened. Only checks major changes in the variants, not minor inner
+		/// values.
+		PhaseTransitioned {
+			/// the source phase
+			from: Phase<BlockNumberFor<T>>,
+			/// The target phase
+			to: Phase<BlockNumberFor<T>>,
+		},
+	}
+
+	/// Error of the pallet that can be returned in response to dispatches.
+	#[pallet::error]
+	pub enum Error<T> {
+		/// Triggering the `Fallback` failed.
+		Fallback,
+		/// Unexpected phase
+		UnexpectedPhase,
+		/// Snapshot was unavailable.
+		Snapshot,
+	}
+
+	/// Common errors in all sub-pallets and miner.
+	#[derive(PartialEq, Eq, Clone, Encode, Decode, Debug)]
+	pub enum CommonError {
+		/// Submission is too early (or too late, depending on your point of reference).
+		EarlySubmission,
+		/// The round counter is wrong.
+		WrongRound,
+		/// Submission is too weak to be considered an improvement.
+		WeakSubmission,
+		/// Wrong number of pages in the solution.
+		WrongPageCount,
+		/// Wrong number of winners presented.
+		WrongWinnerCount,
+		/// The snapshot fingerprint is not a match. The solution is likely outdated.
+		WrongFingerprint,
+		/// Snapshot was not available.
+		Snapshot,
+	}
+
+	/// Internal counter for the number of rounds.
+	///
+	/// This is useful for de-duplication of transactions submitted to the pool, and general
+	/// diagnostics of the pallet.
+	///
+	/// This is merely incremented once per every time that an upstream `elect` is called.
+	#[pallet::storage]
+	#[pallet::getter(fn round)]
+	pub type Round<T: Config> = StorageValue<_, u32, ValueQuery>;
+
+	/// Current phase.
+	#[pallet::storage]
+	#[pallet::getter(fn current_phase)]
+	pub type CurrentPhase<T: Config> = StorageValue<_, Phase<BlockNumberFor<T>>, ValueQuery>;
+
+	/// Wrapper struct for working with snapshots.
+	///
+	/// It manages the following storage items:
+	///
+	/// - `DesiredTargets`: The number of targets that we wish to collect.
+	/// - `PagedVoterSnapshot`: Paginated map of voters.
+	/// - `PagedVoterSnapshotHash`: Hash of the aforementioned.
+	/// - `PagedTargetSnapshot`: Paginated map of targets.
+	/// - `PagedTargetSnapshotHash`: Hash of the aforementioned.
+	///
+	/// ### Invariants
+	///
+	/// The following invariants must be met at **all times** for this storage item to be "correct".
+	///
+	/// - `PagedVoterSnapshotHash` must always contain the correct the same number of keys, and the
+	///   corresponding hash of the `PagedVoterSnapshot`.
+	/// - `PagedTargetSnapshotHash` must always contain the correct the same number of keys, and the
+	///   corresponding hash of the `PagedTargetSnapshot`.
+	///
+	/// - If any page from the paged voters/targets exists, then the aforementioned (desired
+	///   targets) must also exist.
+	///
+	/// The following invariants might need to hold based on the current phase.
+	///
+	///   - If `Phase` IS `Snapshot(_)`, then partial voter/target pages must exist from `msp` to
+	///     `lsp` based on the inner value.
+	///   - If `Phase` IS `Off`, then, no snapshot must exist.
+	///   - In all other phases, the snapshot must FULLY exist.
+	pub(crate) struct Snapshot<T>(sp_std::marker::PhantomData<T>);
+	impl<T: Config> Snapshot<T> {
+		// ----------- mutable methods
+		pub(crate) fn set_desired_targets(d: u32) {
+			DesiredTargets::<T>::put(d);
+		}
+
+		pub(crate) fn set_targets(targets: BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>) {
+			let hash = Self::write_storage_with_pre_allocate(
+				&PagedTargetSnapshot::<T>::hashed_key_for(Pallet::<T>::msp()),
+				targets,
+			);
+			PagedTargetSnapshotHash::<T>::insert(Pallet::<T>::msp(), hash);
+		}
+
+		pub(crate) fn set_voters(page: PageIndex, voters: VoterPageOf<T::MinerConfig>) {
+			let hash = Self::write_storage_with_pre_allocate(
+				&PagedVoterSnapshot::<T>::hashed_key_for(page),
+				voters,
+			);
+			PagedVoterSnapshotHash::<T>::insert(page, hash);
+		}
+
+		/// Destroy the entire snapshot.
+		///
+		/// Should be called only once we transition to [`Phase::Off`].
+		pub(crate) fn kill() {
+			DesiredTargets::<T>::kill();
+			clear_paged_map!(PagedVoterSnapshot::<T>);
+			clear_paged_map!(PagedVoterSnapshotHash::<T>);
+			clear_paged_map!(PagedTargetSnapshot::<T>);
+			clear_paged_map!(PagedTargetSnapshotHash::<T>);
+		}
+
+		// ----------- non-mutables
+		pub(crate) fn desired_targets() -> Option<u32> {
+			DesiredTargets::<T>::get()
+		}
+
+		pub(crate) fn voters(page: PageIndex) -> Option<VoterPageOf<T::MinerConfig>> {
+			PagedVoterSnapshot::<T>::get(page)
+		}
+
+		pub(crate) fn targets() -> Option<BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>> {
+			// NOTE: targets always have one index, which is 0, aka lsp.
+			PagedTargetSnapshot::<T>::get(Pallet::<T>::msp())
+		}
+
+		/// Get a fingerprint of the snapshot, from all the hashes that are stored for each page of
+		/// the snapshot.
+		///
+		/// This is computed as: `(target_hash, voter_hash_n, voter_hash_(n-1), ..., voter_hash_0)`
+		/// where `n` is `T::Pages - 1`. In other words, it is the concatenated hash of targets, and
+		/// voters, from `msp` to `lsp`.
+		pub fn fingerprint() -> T::Hash {
+			let mut hashed_target_and_voters =
+				Self::targets_hash().unwrap_or_default().as_ref().to_vec();
+			let hashed_voters = (Pallet::<T>::msp()..=Pallet::<T>::lsp())
+				.map(|i| PagedVoterSnapshotHash::<T>::get(i).unwrap_or_default())
+				.flat_map(|hash| <T::Hash as AsRef<[u8]>>::as_ref(&hash).to_owned())
+				.collect::<Vec<u8>>();
+			hashed_target_and_voters.extend(hashed_voters);
+			T::Hashing::hash(&hashed_target_and_voters)
+		}
+
+		fn write_storage_with_pre_allocate<E: Encode>(key: &[u8], data: E) -> T::Hash {
+			let size = data.encoded_size();
+			let mut buffer = Vec::with_capacity(size);
+			data.encode_to(&mut buffer);
+
+			let hash = T::Hashing::hash(&buffer);
+
+			// do some checks.
+			debug_assert_eq!(buffer, data.encode());
+			// buffer should have not re-allocated since.
+			debug_assert!(buffer.len() == size && size == buffer.capacity());
+			sp_io::storage::set(key, &buffer);
+
+			hash
+		}
+
+		pub(crate) fn targets_hash() -> Option<T::Hash> {
+			PagedTargetSnapshotHash::<T>::get(Pallet::<T>::msp())
+		}
+	}
+
+	#[allow(unused)]
+	#[cfg(any(test, feature = "runtime-benchmarks", feature = "try-runtime"))]
+	impl<T: Config> Snapshot<T> {
+		pub(crate) fn ensure_snapshot(
+			exists: bool,
+			mut up_to_page: PageIndex,
+		) -> Result<(), &'static str> {
+			up_to_page = up_to_page.min(T::Pages::get());
+			// NOTE: if someday we split the snapshot taking of voters(msp) and targets into two
+			// different blocks, then this assertion becomes obsolete.
+			ensure!(up_to_page > 0, "can't check snapshot up to page 0");
+
+			// if any number of pages supposed to exist, these must also exist.
+			ensure!(exists ^ Self::desired_targets().is_none(), "desired target mismatch");
+			ensure!(exists ^ Self::targets().is_none(), "targets mismatch");
+			ensure!(exists ^ Self::targets_hash().is_none(), "targets hash mismatch");
+
+			// and the hash is correct.
+			if let Some(targets) = Self::targets() {
+				let hash = Self::targets_hash().expect("must exist; qed");
+				ensure!(hash == T::Hashing::hash(&targets.encode()), "targets hash mismatch");
+			}
+
+			// ensure that voter pages that should exist, indeed to exist..
+			let mut sum_existing_voters = 0;
+			for p in (crate::Pallet::<T>::lsp()..=crate::Pallet::<T>::msp())
+				.rev()
+				.take(up_to_page as usize)
+			{
+				ensure!(
+					(exists ^ Self::voters(p).is_none()) &&
+						(exists ^ Self::voters_hash(p).is_none()),
+					"voter page existence mismatch"
+				);
+
+				if let Some(voters_page) = Self::voters(p) {
+					sum_existing_voters = sum_existing_voters.saturating_add(voters_page.len());
+					let hash = Self::voters_hash(p).expect("must exist; qed");
+					ensure!(hash == T::Hashing::hash(&voters_page.encode()), "voter hash mismatch");
+				}
+			}
+
+			// ..and those that should not exist, indeed DON'T.
+			for p in (crate::Pallet::<T>::lsp()..=crate::Pallet::<T>::msp())
+				.take((T::Pages::get() - up_to_page) as usize)
+			{
+				ensure!(
+					(exists ^ Self::voters(p).is_some()) &&
+						(exists ^ Self::voters_hash(p).is_some()),
+					"voter page non-existence mismatch"
+				);
+			}
+
+			Ok(())
+		}
+
+		pub(crate) fn ensure_full_snapshot() -> Result<(), &'static str> {
+			// if any number of pages supposed to exist, these must also exist.
+			ensure!(Self::desired_targets().is_some(), "desired target mismatch");
+			ensure!(Self::targets_hash().is_some(), "targets hash mismatch");
+			ensure!(
+				Self::targets_decode_len().unwrap_or_default() as u32 ==
+					T::TargetSnapshotPerBlock::get(),
+				"targets decode length mismatch"
+			);
+
+			// ensure that voter pages that should exist, indeed to exist..
+			for p in crate::Pallet::<T>::lsp()..=crate::Pallet::<T>::msp() {
+				ensure!(
+					Self::voters_hash(p).is_some() &&
+						Self::voters_decode_len(p).unwrap_or_default() as u32 ==
+							T::VoterSnapshotPerBlock::get(),
+					"voter page existence mismatch"
+				);
+			}
+
+			Ok(())
+		}
+
+		pub(crate) fn voters_decode_len(page: PageIndex) -> Option<usize> {
+			PagedVoterSnapshot::<T>::decode_len(page)
+		}
+
+		pub(crate) fn targets_decode_len() -> Option<usize> {
+			PagedTargetSnapshot::<T>::decode_len(Pallet::<T>::msp())
+		}
+
+		pub(crate) fn voters_hash(page: PageIndex) -> Option<T::Hash> {
+			PagedVoterSnapshotHash::<T>::get(page)
+		}
+
+		pub(crate) fn sanity_check() -> Result<(), &'static str> {
+			// check the snapshot existence based on the phase. This checks all of the needed
+			// conditions except for the metadata values.
+			let _ = match Pallet::<T>::current_phase() {
+				// no page should exist in this phase.
+				Phase::Off => Self::ensure_snapshot(false, T::Pages::get()),
+				// exact number of pages must exist in this phase.
+				Phase::Snapshot(p) => Self::ensure_snapshot(true, T::Pages::get() - p),
+				// full snapshot must exist in these phases.
+				Phase::Emergency |
+				Phase::Signed |
+				Phase::SignedValidation(_) |
+				Phase::Export(_) |
+				Phase::Unsigned(_) => Self::ensure_snapshot(true, T::Pages::get()),
+				// cannot assume anything. We might halt at any point.
+				Phase::Halted => Ok(()),
+			}?;
+
+			Ok(())
+		}
+	}
+
+	#[cfg(test)]
+	impl<T: Config> Snapshot<T> {
+		pub(crate) fn voter_pages() -> PageIndex {
+			use sp_runtime::SaturatedConversion;
+			PagedVoterSnapshot::<T>::iter().count().saturated_into::<PageIndex>()
+		}
+
+		pub(crate) fn target_pages() -> PageIndex {
+			use sp_runtime::SaturatedConversion;
+			PagedTargetSnapshot::<T>::iter().count().saturated_into::<PageIndex>()
+		}
+
+		pub(crate) fn voters_iter_flattened() -> impl Iterator<Item = VoterOf<T::MinerConfig>> {
+			let key_range =
+				(crate::Pallet::<T>::lsp()..=crate::Pallet::<T>::msp()).collect::<Vec<_>>();
+			key_range
+				.into_iter()
+				.flat_map(|k| PagedVoterSnapshot::<T>::get(k).unwrap_or_default())
+		}
+
+		pub(crate) fn remove_voter_page(page: PageIndex) {
+			PagedVoterSnapshot::<T>::remove(page);
+		}
+
+		pub(crate) fn kill_desired_targets() {
+			DesiredTargets::<T>::kill();
+		}
+
+		pub(crate) fn remove_target_page() {
+			PagedTargetSnapshot::<T>::remove(Pallet::<T>::msp());
+		}
+
+		pub(crate) fn remove_target(at: usize) {
+			PagedTargetSnapshot::<T>::mutate(crate::Pallet::<T>::msp(), |maybe_targets| {
+				if let Some(targets) = maybe_targets {
+					targets.remove(at);
+					// and update the hash.
+					PagedTargetSnapshotHash::<T>::insert(
+						crate::Pallet::<T>::msp(),
+						T::Hashing::hash(&targets.encode()),
+					)
+				} else {
+					unreachable!();
+				}
+			})
+		}
+	}
+
+	/// Desired number of targets to elect for this round.
+	#[pallet::storage]
+	type DesiredTargets<T> = StorageValue<_, u32>;
+	/// Paginated voter snapshot. At most [`T::Pages`] keys will exist.
+	#[pallet::storage]
+	type PagedVoterSnapshot<T: Config> =
+		StorageMap<_, Twox64Concat, PageIndex, VoterPageOf<T::MinerConfig>>;
+	/// Same as [`PagedVoterSnapshot`], but it will store the hash of the snapshot.
+	///
+	/// The hash is generated using [`frame_system::Config::Hashing`].
+	#[pallet::storage]
+	type PagedVoterSnapshotHash<T: Config> = StorageMap<_, Twox64Concat, PageIndex, T::Hash>;
+	/// Paginated target snapshot.
+	///
+	/// For the time being, since we assume one pages of targets, at most ONE key will exist.
+	#[pallet::storage]
+	type PagedTargetSnapshot<T: Config> =
+		StorageMap<_, Twox64Concat, PageIndex, BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>>;
+	/// Same as [`PagedTargetSnapshot`], but it will store the hash of the snapshot.
+	///
+	/// The hash is generated using [`frame_system::Config::Hashing`].
+	#[pallet::storage]
+	type PagedTargetSnapshotHash<T: Config> = StorageMap<_, Twox64Concat, PageIndex, T::Hash>;
+
+	#[pallet::pallet]
+	pub struct Pallet<T>(PhantomData<T>);
+}
+
+impl<T: Config> Pallet<T> {
+	/// Returns the most significant page of the snapshot.
+	///
+	/// Based on the contract of `ElectionDataProvider`, this is the first page that is filled.
+	fn msp() -> PageIndex {
+		T::Pages::get().checked_sub(1).defensive_unwrap_or_default()
+	}
+
+	/// Returns the least significant page of the snapshot.
+	///
+	/// Based on the contract of `ElectionDataProvider`, this is the last page that is filled.
+	fn lsp() -> PageIndex {
+		Zero::zero()
+	}
+
+	pub(crate) fn phase_transition(to: Phase<BlockNumberFor<T>>) {
+		log!(debug, "transitioning phase from {:?} to {:?}", Self::current_phase(), to);
+		let from = Self::current_phase();
+		use sp_std::mem::discriminant;
+		if discriminant(&from) != discriminant(&to) {
+			Self::deposit_event(Event::PhaseTransitioned { from, to });
+		}
+		<CurrentPhase<T>>::put(to);
+	}
+
+	/// Perform all the basic checks that are independent of the snapshot. To be more specific,
+	/// these are all the checks that you can do without the need to read the massive blob of the
+	/// actual snapshot. This function only contains a handful of storage reads, with bounded size.
+	///
+	/// A sneaky detail is that this does check the `DesiredTargets` aspect of the snapshot, but
+	/// neither of the large storage items.
+	///
+	/// Moreover, we do optionally check the fingerprint of the snapshot, if provided.
+	///
+	/// These complement a feasibility-check, which is exactly the opposite: snapshot-dependent
+	/// checks.
+	pub(crate) fn snapshot_independent_checks(
+		paged_solution: &PagedRawSolution<T::MinerConfig>,
+		maybe_snapshot_fingerprint: Option<T::Hash>,
+	) -> Result<(), CommonError> {
+		// Note that the order of these checks are critical for the correctness and performance of
+		// `restore_or_compute_then_maybe_submit`. We want to make sure that we always check round
+		// first, so that if it has a wrong round, we can detect and delete it from the cache right
+		// from the get go.
+
+		// ensure round is current
+		ensure!(Self::round() == paged_solution.round, CommonError::WrongRound);
+
+		// ensure score is being improved, if the claim is even correct.
+		ensure!(
+			<T::Verifier as Verifier>::ensure_claimed_score_improves(paged_solution.score),
+			CommonError::WeakSubmission,
+		);
+
+		// ensure solution pages are no more than the snapshot
+		ensure!(
+			paged_solution.solution_pages.len().saturated_into::<PageIndex>() <= T::Pages::get(),
+			CommonError::WrongPageCount
+		);
+
+		// finally, check the winner count being correct.
+		if let Some(desired_targets) = Snapshot::<T>::desired_targets() {
+			ensure!(
+				desired_targets == paged_solution.winner_count_single_page_target_snapshot() as u32,
+				CommonError::WrongWinnerCount
+			)
+		}
+
+		// check the snapshot fingerprint, if asked for.
+		ensure!(
+			maybe_snapshot_fingerprint
+				.map_or(true, |snapshot_fingerprint| Snapshot::<T>::fingerprint() ==
+					snapshot_fingerprint),
+			CommonError::WrongFingerprint
+		);
+
+		Ok(())
+	}
+
+	/// Creates the target snapshot.
+	pub(crate) fn create_targets_snapshot() -> Result<(), ElectionError<T>> {
+		// if requested, get the targets as well.
+		Snapshot::<T>::set_desired_targets(
+			T::DataProvider::desired_targets().map_err(ElectionError::DataProvider)?,
+		);
+
+		let count = T::TargetSnapshotPerBlock::get();
+		let bounds = DataProviderBounds { count: Some(count.into()), size: None };
+		let targets: BoundedVec<_, T::TargetSnapshotPerBlock> =
+			T::DataProvider::electable_targets(bounds, 0)
+				.and_then(|v| v.try_into().map_err(|_| "try-into failed"))
+				.map_err(ElectionError::DataProvider)?;
+
+		let count = targets.len() as u32;
+		log!(debug, "created target snapshot with {} targets.", count);
+		Snapshot::<T>::set_targets(targets);
+
+		Ok(())
+	}
+
+	/// Creates the voter snapshot.
+	pub(crate) fn create_voters_snapshot_paged(
+		remaining: PageIndex,
+	) -> Result<(), ElectionError<T>> {
+		let count = T::VoterSnapshotPerBlock::get();
+		let bounds = DataProviderBounds { count: Some(count.into()), size: None };
+		let voters: BoundedVec<_, T::VoterSnapshotPerBlock> =
+			T::DataProvider::electing_voters(bounds, remaining)
+				.and_then(|v| v.try_into().map_err(|_| "try-into failed"))
+				.map_err(ElectionError::DataProvider)?;
+
+		let count = voters.len() as u32;
+		Snapshot::<T>::set_voters(remaining, voters);
+		log!(debug, "created voter snapshot with {} voters, {} remaining.", count, remaining);
+
+		Ok(())
+	}
+
+	/// Perform the tasks to be done after a new `elect` has been triggered:
+	///
+	/// 1. Increment round.
+	/// 2. Change phase to [`Phase::Off`]
+	/// 3. Clear all snapshot data.
+	pub(crate) fn rotate_round() {
+		// Inc round.
+		<Round<T>>::mutate(|r| *r += 1);
+
+		// Phase is off now.
+		Self::phase_transition(Phase::Off);
+
+		// Kill everything in the verifier.
+		T::Verifier::kill();
+
+		// Kill the snapshot.
+		Snapshot::<T>::kill();
+	}
+
+	/// Call fallback for the given page.
+	///
+	/// This uses the [`ElectionProvider::bother`] to check if the fallback is actually going to do
+	/// anything. If so, it will re-collect the associated snapshot page and do the fallback. Else,
+	/// it will early return without touching the snapshot.
+	fn fallback_for_page(page: PageIndex) -> Result<BoundedSupportsOf<Self>, ElectionError<T>> {
+		use frame_election_provider_support::InstantElectionProvider;
+		let (voters, targets, desired_targets) = if T::Fallback::bother() {
+			(
+				Snapshot::<T>::voters(page).ok_or(ElectionError::Other("snapshot!"))?,
+				Snapshot::<T>::targets().ok_or(ElectionError::Other("snapshot!"))?,
+				Snapshot::<T>::desired_targets().ok_or(ElectionError::Other("snapshot!"))?,
+			)
+		} else {
+			(Default::default(), Default::default(), Default::default())
+		};
+		T::Fallback::instant_elect(voters.into_inner(), targets.into_inner(), desired_targets)
+			.map_err(|fe| ElectionError::Fallback(fe))
+	}
+
+	#[cfg(any(test, feature = "runtime-benchmarks", feature = "try-runtime"))]
+	pub(crate) fn do_try_state(_: BlockNumberFor<T>) -> Result<(), &'static str> {
+		Snapshot::<T>::sanity_check()
+	}
+}
+
+#[allow(unused)]
+#[cfg(any(feature = "runtime-benchmarks", test))]
+// helper code for testing and benchmarking
+impl<T> Pallet<T>
+where
+	T: Config + crate::signed::Config + crate::unsigned::Config + crate::verifier::Config,
+	BlockNumberFor<T>: From<u32>,
+{
+	/// A reasonable next election block number.
+	///
+	/// This should be passed into `T::DataProvider::set_next_election` in benchmarking.
+	pub(crate) fn reasonable_next_election() -> u32 {
+		let signed: u32 = T::SignedPhase::get().saturated_into();
+		let unsigned: u32 = T::UnsignedPhase::get().saturated_into();
+		let signed_validation: u32 = T::SignedValidationPhase::get().saturated_into();
+		(T::Pages::get() + signed + unsigned + signed_validation) * 2
+	}
+
+	/// Progress blocks until the criteria is met.
+	pub(crate) fn roll_until_matches(criteria: impl FnOnce() -> bool + Copy) {
+		loop {
+			Self::roll_next(true, false);
+			if criteria() {
+				break
+			}
+		}
+	}
+
+	/// Progress blocks until one block before the criteria is met.
+	pub(crate) fn run_until_before_matches(criteria: impl FnOnce() -> bool + Copy) {
+		use frame_support::storage::TransactionOutcome;
+		loop {
+			let should_break = frame_support::storage::with_transaction(
+				|| -> TransactionOutcome<Result<_, DispatchError>> {
+					Pallet::<T>::roll_next(true, false);
+					if criteria() {
+						TransactionOutcome::Rollback(Ok(true))
+					} else {
+						TransactionOutcome::Commit(Ok(false))
+					}
+				},
+			)
+			.unwrap();
+
+			if should_break {
+				break
+			}
+		}
+	}
+
+	pub(crate) fn roll_to_signed_and_mine_full_solution() -> PagedRawSolution<T::MinerConfig> {
+		use unsigned::miner::OffchainWorkerMiner;
+		Self::roll_until_matches(|| Self::current_phase() == Phase::Signed);
+		// ensure snapshot is full.
+		crate::Snapshot::<T>::ensure_full_snapshot().expect("Snapshot is not full");
+		OffchainWorkerMiner::<T>::mine_solution(T::Pages::get(), false).unwrap()
+	}
+
+	pub(crate) fn submit_full_solution(
+		PagedRawSolution { score, solution_pages, .. }: PagedRawSolution<T::MinerConfig>,
+	) {
+		use frame_system::RawOrigin;
+		use sp_std::boxed::Box;
+		use types::Pagify;
+
+		// register alice
+		let alice = crate::Pallet::<T>::funded_account("alice", 0);
+		signed::Pallet::<T>::register(RawOrigin::Signed(alice.clone()).into(), score).unwrap();
+
+		// submit pages
+		solution_pages
+			.pagify(T::Pages::get())
+			.map(|(index, page)| {
+				signed::Pallet::<T>::submit_page(
+					RawOrigin::Signed(alice.clone()).into(),
+					index,
+					Some(Box::new(page.clone())),
+				)
+			})
+			.collect::<Result<Vec<_>, _>>()
+			.unwrap();
+	}
+
+	pub(crate) fn roll_to_signed_and_submit_full_solution() {
+		Self::submit_full_solution(Self::roll_to_signed_and_mine_full_solution());
+	}
+
+	fn funded_account(seed: &'static str, index: u32) -> T::AccountId {
+		use frame_benchmarking::whitelist;
+		use frame_support::traits::fungible::{Inspect, Mutate};
+		let who: T::AccountId = frame_benchmarking::account(seed, index, 777);
+		whitelist!(who);
+		let balance = T::Currency::minimum_balance() * 10000u32.into();
+		T::Currency::mint_into(&who, balance).unwrap();
+		who
+	}
+
+	/// Roll all pallets forward, for the given number of blocks.
+	pub(crate) fn roll_to(n: BlockNumberFor<T>, with_signed: bool, try_state: bool) {
+		let now = frame_system::Pallet::<T>::block_number();
+		assert!(n > now, "cannot roll to current or past block");
+		let one: BlockNumberFor<T> = 1u32.into();
+		let mut i = now + one;
+		while i <= n {
+			frame_system::Pallet::<T>::set_block_number(i);
+
+			Pallet::<T>::on_initialize(i);
+			verifier::Pallet::<T>::on_initialize(i);
+			unsigned::Pallet::<T>::on_initialize(i);
+
+			if with_signed {
+				signed::Pallet::<T>::on_initialize(i);
+			}
+
+			// invariants must hold at the end of each block.
+			if try_state {
+				Pallet::<T>::do_try_state(i).unwrap();
+				verifier::Pallet::<T>::do_try_state(i).unwrap();
+				unsigned::Pallet::<T>::do_try_state(i).unwrap();
+				signed::Pallet::<T>::do_try_state(i).unwrap();
+			}
+
+			i += one;
+		}
+	}
+
+	/// Roll to next block.
+	pub(crate) fn roll_next(with_signed: bool, try_state: bool) {
+		Self::roll_to(
+			frame_system::Pallet::<T>::block_number() + 1u32.into(),
+			with_signed,
+			try_state,
+		);
+	}
+}
+
+impl<T: Config> ElectionProvider for Pallet<T> {
+	type AccountId = T::AccountId;
+	type BlockNumber = BlockNumberFor<T>;
+	type Error = ElectionError<T>;
+	type DataProvider = T::DataProvider;
+	type Pages = T::Pages;
+	type MaxWinnersPerPage = <T::Verifier as Verifier>::MaxWinnersPerPage;
+	type MaxBackersPerWinner = <T::Verifier as Verifier>::MaxBackersPerWinner;
+
+	fn elect(remaining: PageIndex) -> Result<BoundedSupportsOf<Self>, Self::Error> {
+		if !Self::ongoing() {
+			return Err(ElectionError::NotOngoing);
+		}
+
+		let result = T::Verifier::get_queued_solution_page(remaining)
+			.ok_or(ElectionError::SupportPageNotAvailable)
+			.or_else(|err: ElectionError<T>| {
+				log!(
+					warn,
+					"primary election for page {} failed due to: {:?}, trying fallback",
+					remaining,
+					err,
+				);
+				Self::fallback_for_page(remaining)
+			})
+			.map_err(|err| {
+				// if any pages returns an error, we go into the emergency phase and don't do
+				// anything else anymore. This will prevent any new submissions to signed and
+				// unsigned pallet, and thus the verifier will also be almost stuck, except for the
+				// submission of emergency solutions.
+				log!(warn, "primary and fallback ({:?}) failed for page {:?}", err, remaining);
+				err
+			})
+			.map(|supports| {
+				// convert to bounded
+				supports.into()
+			});
+
+		// if fallback has possibly put us into the emergency phase, don't do anything else.
+		if CurrentPhase::<T>::get().is_emergency() && result.is_err() {
+			log!(error, "Emergency phase triggered, halting the election.");
+		} else {
+			if remaining.is_zero() {
+				log!(info, "receiving last call to elect(0), rotating round");
+				Self::rotate_round()
+			} else {
+				Self::phase_transition(Phase::Export(remaining))
+			}
+		}
+
+		result
+	}
+
+	fn ongoing() -> bool {
+		match <CurrentPhase<T>>::get() {
+			Phase::Off | Phase::Halted => false,
+			Phase::Signed |
+			Phase::SignedValidation(_) |
+			Phase::Unsigned(_) |
+			Phase::Snapshot(_) |
+			Phase::Emergency |
+			Phase::Export(_) => true,
+		}
+	}
+}
+
+#[cfg(test)]
+mod phase_rotation {
+	use super::{Event, *};
+	use crate::{mock::*, Phase};
+	use frame_election_provider_support::ElectionProvider;
+	use frame_support::traits::Hooks;
+
+	#[test]
+	fn single_page() {
+		ExtBuilder::full()
+			.pages(1)
+			.fallback_mode(FallbackModes::Onchain)
+			.build_and_execute(|| {
+				// 0 -------- 14 15 --------- 20 ------------- 25 ---------- 30
+				//            |  |            |                |             |
+				//    Snapshot Signed  SignedValidation    Unsigned       elect()
+
+				assert_eq!(System::block_number(), 0);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(false, 1));
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(4);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(13);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+
+				roll_to(14);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0));
+
+				roll_to(15);
+				assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(0) },
+						Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed }
+					]
+				);
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 1));
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(19);
+				assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 1));
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(20);
+				assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(20));
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(0) },
+						Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+						Event::PhaseTransitioned {
+							from: Phase::Signed,
+							to: Phase::SignedValidation(20)
+						}
+					],
+				);
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 1));
+
+				roll_to(24);
+				assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(20));
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 1));
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(25);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(0) },
+						Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+						Event::PhaseTransitioned {
+							from: Phase::Signed,
+							to: Phase::SignedValidation(20)
+						},
+						Event::PhaseTransitioned {
+							from: Phase::SignedValidation(20),
+							to: Phase::Unsigned(25)
+						}
+					],
+				);
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 1));
+
+				roll_to(30);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 1));
+
+				// We close when upstream tells us to elect.
+				roll_to(32);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 1));
+
+				MultiBlock::elect(0).unwrap();
+
+				assert!(MultiBlock::current_phase().is_off());
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(false, 1));
+				assert_eq!(MultiBlock::round(), 1);
+
+				roll_to(43);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+
+				roll_to(44);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0));
+
+				roll_to(45);
+				assert!(MultiBlock::current_phase().is_signed());
+
+				roll_to(50);
+				assert!(MultiBlock::current_phase().is_signed_validation_open_at(50));
+
+				roll_to(55);
+				assert!(MultiBlock::current_phase().is_unsigned_open_at(55));
+			})
+	}
+
+	#[test]
+	fn multi_page_2() {
+		ExtBuilder::full()
+			.pages(2)
+			.fallback_mode(FallbackModes::Onchain)
+			.build_and_execute(|| {
+				// 0 -------13 14 15 ------- 20 ---- 25 ------- 30
+				//           |     |         |       |          |
+				//    Snapshot    Signed SigValid  Unsigned   Elect
+
+				assert_eq!(System::block_number(), 0);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(false, 2));
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(4);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(12);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+
+				roll_to(13);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(1));
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 1));
+
+				roll_to(14);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0));
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 2));
+
+				roll_to(15);
+				assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(1) },
+						Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed }
+					]
+				);
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 2));
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(19);
+				assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 2));
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(20);
+				assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(20));
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(1) },
+						Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+						Event::PhaseTransitioned {
+							from: Phase::Signed,
+							to: Phase::SignedValidation(20)
+						}
+					],
+				);
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 2));
+
+				roll_to(24);
+				assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(20));
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 2));
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(25);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(1) },
+						Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+						Event::PhaseTransitioned {
+							from: Phase::Signed,
+							to: Phase::SignedValidation(20)
+						},
+						Event::PhaseTransitioned {
+							from: Phase::SignedValidation(20),
+							to: Phase::Unsigned(25)
+						}
+					],
+				);
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 2));
+
+				roll_to(29);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 2));
+
+				roll_to(30);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 2));
+
+				// We close when upstream tells us to elect.
+				roll_to(32);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+
+				MultiBlock::elect(0).unwrap(); // and even this one's coming from the fallback.
+				assert!(MultiBlock::current_phase().is_off());
+
+				// all snapshots are gone.
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(false, 2));
+				assert_eq!(MultiBlock::round(), 1);
+
+				roll_to(42);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+
+				roll_to(43);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(1));
+
+				roll_to(44);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0));
+
+				roll_to(45);
+				assert!(MultiBlock::current_phase().is_signed());
+
+				roll_to(50);
+				assert!(MultiBlock::current_phase().is_signed_validation_open_at(50));
+
+				roll_to(55);
+				assert!(MultiBlock::current_phase().is_unsigned_open_at(55));
+			})
+	}
+
+	#[test]
+	fn multi_page_3() {
+		ExtBuilder::full()
+			.pages(3)
+			.fallback_mode(FallbackModes::Onchain)
+			.build_and_execute(|| {
+				// 0 ------- 12 13 14 15 ----------- 20 ---------25 ------- 30
+				//            |       |              |            |          |
+				//     Snapshot      Signed   SignedValidation  Unsigned   Elect
+
+				assert_eq!(System::block_number(), 0);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(false, 3));
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(4);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(11);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+
+				roll_to(12);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(2));
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 1));
+
+				roll_to(13);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(1));
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 2));
+
+				roll_to(14);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0));
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 3));
+
+				roll_to(15);
+				assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+						Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed }
+					]
+				);
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(19);
+				assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(20);
+				assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(20));
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+						Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+						Event::PhaseTransitioned {
+							from: Phase::Signed,
+							to: Phase::SignedValidation(20)
+						}
+					]
+				);
+
+				roll_to(24);
+				assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(20));
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(25);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+						Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+						Event::PhaseTransitioned {
+							from: Phase::Signed,
+							to: Phase::SignedValidation(20)
+						},
+						Event::PhaseTransitioned {
+							from: Phase::SignedValidation(20),
+							to: Phase::Unsigned(25)
+						}
+					]
+				);
+
+				roll_to(29);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+
+				roll_to(30);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+
+				// We close when upstream tells us to elect.
+				roll_to(32);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+
+				MultiBlock::elect(0).unwrap();
+				assert!(MultiBlock::current_phase().is_off());
+
+				// all snapshots are gone.
+				assert_none_snapshot();
+				assert_eq!(MultiBlock::round(), 1);
+
+				roll_to(41);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+
+				roll_to(42);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(2));
+
+				roll_to(43);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(1));
+
+				roll_to(44);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0));
+
+				roll_to(45);
+				assert!(MultiBlock::current_phase().is_signed());
+
+				roll_to(50);
+				assert!(MultiBlock::current_phase().is_signed_validation_open_at(50));
+
+				roll_to(55);
+				assert!(MultiBlock::current_phase().is_unsigned_open_at(55));
+			})
+	}
+
+	#[test]
+	fn multi_with_lookahead() {
+		ExtBuilder::full()
+			.pages(3)
+			.lookahead(2)
+			.fallback_mode(FallbackModes::Onchain)
+			.build_and_execute(|| {
+				// 0 ------- 10 11 12 13 ----------- 17 ---------22 ------- 27
+				//            |       |              |            |          |
+				//     Snapshot      Signed   SignedValidation  Unsigned   Elect
+
+				assert_eq!(System::block_number(), 0);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+				assert_none_snapshot();
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(4);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(9);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+
+				roll_to(10);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(2));
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 1));
+
+				roll_to(11);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(1));
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 2));
+
+				roll_to(12);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0));
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, 3));
+
+				roll_to(13);
+				assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+						Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed }
+					]
+				);
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(17);
+				assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+				assert_full_snapshot();
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(18);
+				assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(18));
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+						Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+						Event::PhaseTransitioned {
+							from: Phase::Signed,
+							to: Phase::SignedValidation(18)
+						}
+					]
+				);
+
+				roll_to(22);
+				assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(18));
+				assert_full_snapshot();
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(23);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(23));
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+						Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+						Event::PhaseTransitioned {
+							from: Phase::Signed,
+							to: Phase::SignedValidation(18)
+						},
+						Event::PhaseTransitioned {
+							from: Phase::SignedValidation(18),
+							to: Phase::Unsigned(23)
+						}
+					]
+				);
+
+				roll_to(27);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(23));
+
+				roll_to(28);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(23));
+
+				// We close when upstream tells us to elect.
+				roll_to(30);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(23));
+
+				MultiBlock::elect(0).unwrap();
+				assert!(MultiBlock::current_phase().is_off());
+
+				// all snapshots are gone.
+				assert_ok!(Snapshot::<Runtime>::ensure_snapshot(false, 3));
+				assert_eq!(MultiBlock::round(), 1);
+
+				roll_to(41 - 2);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+
+				roll_to(42 - 2);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(2));
+
+				roll_to(43 - 2);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(1));
+
+				roll_to(44 - 2);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0));
+
+				roll_to(45 - 2);
+				assert!(MultiBlock::current_phase().is_signed());
+
+				roll_to(50 - 2);
+				assert!(MultiBlock::current_phase().is_signed_validation_open_at(50 - 2));
+
+				roll_to(55 - 2);
+				assert!(MultiBlock::current_phase().is_unsigned_open_at(55 - 2));
+			})
+	}
+
+	#[test]
+	fn no_unsigned_phase() {
+		ExtBuilder::full()
+			.pages(3)
+			.unsigned_phase(0)
+			.fallback_mode(FallbackModes::Onchain)
+			.build_and_execute(|| {
+				// 0 --------------------- 17 ------ 20 ---------25 ------- 30
+				//            |            |         |            |          |
+				//                     Snapshot    Signed  SignedValidation   Elect
+
+				assert_eq!(System::block_number(), 0);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+				assert_none_snapshot();
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(4);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(17);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(2));
+				roll_to(18);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(1));
+				roll_to(19);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0));
+
+				assert_full_snapshot();
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(20);
+				assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+				roll_to(25);
+				assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(25));
+
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+						Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+						Event::PhaseTransitioned {
+							from: Phase::Signed,
+							to: Phase::SignedValidation(25)
+						},
+					]
+				);
+
+				// Signed validation can now be expanded until a call to `elect` comes
+				roll_to(27);
+				assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(25));
+				roll_to(32);
+				assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(25));
+
+				MultiBlock::elect(0).unwrap();
+				assert!(MultiBlock::current_phase().is_off());
+
+				// all snapshots are gone.
+				assert_none_snapshot();
+				assert_eq!(MultiBlock::round(), 1);
+				assert_ok!(signed::Submissions::<Runtime>::ensure_killed(0));
+				verifier::QueuedSolution::<Runtime>::assert_killed();
+			})
+	}
+
+	#[test]
+	fn no_signed_phase() {
+		ExtBuilder::full()
+			.pages(3)
+			.signed_phase(0, 0)
+			.fallback_mode(FallbackModes::Onchain)
+			.build_and_execute(|| {
+				// 0 ------------------------- 22 ------ 25 ------- 30
+				//                             |         |          |
+				//                         Snapshot   Unsigned   Elect
+
+				assert_eq!(System::block_number(), 0);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+				assert_none_snapshot();
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(20);
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(22);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(2));
+				roll_to(23);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(1));
+				roll_to(24);
+				assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0));
+
+				assert_full_snapshot();
+				assert_eq!(MultiBlock::round(), 0);
+
+				roll_to(25);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+						Event::PhaseTransitioned {
+							from: Phase::Snapshot(0),
+							to: Phase::Unsigned(25)
+						},
+					]
+				);
+
+				// Unsigned can now be expanded until a call to `elect` comes
+				roll_to(27);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+				roll_to(32);
+				assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+
+				MultiBlock::elect(0).unwrap();
+				assert!(MultiBlock::current_phase().is_off());
+
+				// all snapshots are gone.
+				assert_none_snapshot();
+				assert_eq!(MultiBlock::round(), 1);
+				assert_ok!(signed::Submissions::<Runtime>::ensure_killed(0));
+				verifier::QueuedSolution::<Runtime>::assert_killed();
+			})
+	}
+
+	#[test]
+	#[should_panic]
+	fn no_any_phase() {
+		todo!()
+	}
+
+	#[test]
+	#[should_panic(
+		expected = "signed validation phase should be at least as long as the number of pages"
+	)]
+	fn incorrect_signed_validation_phase() {
+		ExtBuilder::full()
+			.pages(3)
+			.signed_validation_phase(2)
+			.build_and_execute(|| <MultiBlock as Hooks<BlockNumber>>::integrity_test())
+	}
+}
+
+#[cfg(test)]
+mod election_provider {
+	use super::*;
+	use crate::{mock::*, unsigned::miner::OffchainWorkerMiner, verifier::Verifier, Phase};
+	use frame_election_provider_support::{BoundedSupport, BoundedSupports, ElectionProvider};
+	use frame_support::{
+		assert_storage_noop, testing_prelude::bounded_vec, unsigned::ValidateUnsigned,
+	};
+
+	// This is probably the most important test of all, a basic, correct scenario. This test should
+	// be studied in detail, and all of the branches of how it can go wrong or diverge from the
+	// basic scenario assessed.
+	#[test]
+	fn multi_page_elect_simple_works() {
+		ExtBuilder::full().build_and_execute(|| {
+			roll_to_signed_open();
+			assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+
+			// load a solution into the verifier
+			let paged = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap();
+			let score = paged.score;
+
+			// now let's submit this one by one, into the signed phase.
+			load_signed_for_verification(99, paged);
+
+			// now the solution should start being verified.
+			roll_to_signed_validation_open();
+
+			assert_eq!(
+				multi_block_events(),
+				vec![
+					Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+					Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+					Event::PhaseTransitioned {
+						from: Phase::Signed,
+						to: Phase::SignedValidation(20)
+					}
+				]
+			);
+			assert_eq!(verifier_events(), vec![]);
+
+			// there is no queued solution prior to the last page of the solution getting verified
+			assert_eq!(<Runtime as crate::Config>::Verifier::queued_score(), None);
+
+			// proceed until it is fully verified.
+			roll_next();
+			assert_eq!(verifier_events(), vec![verifier::Event::Verified(2, 2)]);
+
+			roll_next();
+			assert_eq!(
+				verifier_events(),
+				vec![verifier::Event::Verified(2, 2), verifier::Event::Verified(1, 2)]
+			);
+
+			roll_next();
+			assert_eq!(
+				verifier_events(),
+				vec![
+					verifier::Event::Verified(2, 2),
+					verifier::Event::Verified(1, 2),
+					verifier::Event::Verified(0, 2),
+					verifier::Event::Queued(score, None),
+				]
+			);
+
+			// there is now a queued solution.
+			assert_eq!(<Runtime as crate::Config>::Verifier::queued_score(), Some(score));
+
+			// now let's go to unsigned phase, but we don't expect anything to happen there since we
+			// don't run OCWs.
+			roll_to_unsigned_open();
+
+			// pre-elect state
+			assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+			assert_eq!(MultiBlock::round(), 0);
+			assert_full_snapshot();
+
+			// call elect for each page
+			let _paged_solution = (MultiBlock::lsp()..MultiBlock::msp())
+				.rev() // 2, 1, 0
+				.map(|page| {
+					MultiBlock::elect(page as PageIndex).unwrap();
+					if page == 0 {
+						assert!(MultiBlock::current_phase().is_off())
+					} else {
+						assert!(MultiBlock::current_phase().is_export())
+					}
+				})
+				.collect::<Vec<_>>();
+
+			// after the last elect, verifier is cleared,
+			verifier::QueuedSolution::<Runtime>::assert_killed();
+			// the phase is off,
+			assert_eq!(MultiBlock::current_phase(), Phase::Off);
+			// the round is incremented,
+			assert_eq!(Round::<Runtime>::get(), 1);
+			// and the snapshot is cleared,
+			assert_storage_noop!(Snapshot::<Runtime>::kill());
+			// signed pallet is clean.
+			// NOTE: in the future, if and when we add lazy cleanup to the signed pallet, this
+			// assertion might break.
+			assert_ok!(signed::Submissions::<Runtime>::ensure_killed(0));
+		});
+	}
+
+	#[test]
+	fn multi_page_elect_fast_track() {
+		ExtBuilder::full().build_and_execute(|| {
+			roll_to_signed_open();
+			let round = MultiBlock::round();
+			assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+
+			// load a solution into the verifier
+			let paged = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap();
+			let score = paged.score;
+			load_signed_for_verification_and_start(99, paged, 0);
+
+			// there is no queued solution prior to the last page of the solution getting verified
+			assert_eq!(<Runtime as crate::Config>::Verifier::queued_score(), None);
+
+			// roll to the block it is finalized
+			roll_next();
+			roll_next();
+			roll_next();
+			assert_eq!(
+				verifier_events(),
+				vec![
+					verifier::Event::Verified(2, 2),
+					verifier::Event::Verified(1, 2),
+					verifier::Event::Verified(0, 2),
+					verifier::Event::Queued(score, None),
+				]
+			);
+
+			// there is now a queued solution.
+			assert_eq!(<Runtime as crate::Config>::Verifier::queued_score(), Some(score));
+
+			// not much impact, just for the sane-ness of the test.
+			roll_to_unsigned_open();
+
+			// pre-elect state:
+			assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+			assert_eq!(Round::<Runtime>::get(), 0);
+			assert_full_snapshot();
+
+			// there are 3 pages (indexes 2..=0), but we short circuit by just calling 0.
+			let _solution = crate::Pallet::<Runtime>::elect(0).unwrap();
+
+			// round is incremented.
+			assert_eq!(MultiBlock::round(), round + 1);
+			// after elect(0) is called, verifier is cleared,
+			verifier::QueuedSolution::<Runtime>::assert_killed();
+			// the phase is off,
+			assert_eq!(MultiBlock::current_phase(), Phase::Off);
+			// the round is incremented,
+			assert_eq!(Round::<Runtime>::get(), 1);
+			// the snapshot is cleared,
+			assert_none_snapshot();
+			// and signed pallet is clean.
+			assert_ok!(signed::Submissions::<Runtime>::ensure_killed(round));
+		});
+	}
+
+	#[test]
+	fn elect_does_not_finish_without_call_of_page_0() {
+		ExtBuilder::full().build_and_execute(|| {
+			roll_to_signed_open();
+			assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+
+			// load a solution into the verifier
+			let paged = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap();
+			let score = paged.score;
+			load_signed_for_verification_and_start(99, paged, 0);
+
+			// there is no queued solution prior to the last page of the solution getting verified
+			assert_eq!(<Runtime as crate::Config>::Verifier::queued_score(), None);
+
+			// roll to the block it is finalized
+			roll_next();
+			roll_next();
+			roll_next();
+			assert_eq!(
+				verifier_events(),
+				vec![
+					verifier::Event::Verified(2, 2),
+					verifier::Event::Verified(1, 2),
+					verifier::Event::Verified(0, 2),
+					verifier::Event::Queued(score, None),
+				]
+			);
+
+			// there is now a queued solution
+			assert_eq!(<Runtime as crate::Config>::Verifier::queued_score(), Some(score));
+
+			// not much impact, just for the sane-ness of the test.
+			roll_to_unsigned_open();
+
+			// pre-elect state:
+			assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+			assert_eq!(Round::<Runtime>::get(), 0);
+			assert_full_snapshot();
+
+			// call elect for page 2 and 1, but NOT 0
+			let solutions = (1..=MultiBlock::msp())
+				.rev() // 2, 1
+				.map(|page| {
+					crate::Pallet::<Runtime>::elect(page as PageIndex).unwrap();
+					assert!(MultiBlock::current_phase().is_export());
+				})
+				.collect::<Vec<_>>();
+			assert_eq!(solutions.len(), 2);
+
+			// nothing changes from the prelect state, except phase is now export.
+			assert!(MultiBlock::current_phase().is_export());
+			assert_eq!(Round::<Runtime>::get(), 0);
+			assert_full_snapshot();
+		});
+	}
+
+	#[test]
+	fn when_passive_stay_in_phase_unsigned() {
+		ExtBuilder::full().build_and_execute(|| {
+			// once the unsigned phase starts, it will not be changed by on_initialize (something
+			// like `elect` must be called).
+			roll_to_unsigned_open();
+			for _ in 0..100 {
+				roll_next();
+				assert!(matches!(MultiBlock::current_phase(), Phase::Unsigned(_)));
+			}
+		});
+	}
+
+	#[test]
+	fn skip_unsigned_phase() {
+		ExtBuilder::full().build_and_execute(|| {
+			roll_to_signed_open();
+			assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+			let round = MultiBlock::round();
+
+			// load a solution into the verifier
+			let paged = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap();
+
+			load_signed_for_verification_and_start_and_roll_to_verified(99, paged, 0);
+
+			// and right here, in the middle of the signed verification phase, we close the round.
+			// Everything should work fine.
+			assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(20));
+			assert_eq!(Round::<Runtime>::get(), 0);
+			assert_full_snapshot();
+
+			// fetch all pages.
+			let _paged_solution = (MultiBlock::lsp()..MultiBlock::msp())
+				.rev() // 2, 1, 0
+				.map(|page| {
+					MultiBlock::elect(page as PageIndex).unwrap();
+					if page == 0 {
+						assert!(MultiBlock::current_phase().is_off())
+					} else {
+						assert!(MultiBlock::current_phase().is_export())
+					}
+				})
+				.collect::<Vec<_>>();
+
+			// round is incremented.
+			assert_eq!(MultiBlock::round(), round + 1);
+			// after elect(0) is called, verifier is cleared,
+			verifier::QueuedSolution::<Runtime>::assert_killed();
+			// the phase is off,
+			assert_eq!(MultiBlock::current_phase(), Phase::Off);
+			// the snapshot is cleared,
+			assert_storage_noop!(Snapshot::<Runtime>::kill());
+			// and signed pallet is clean.
+			assert_ok!(signed::Submissions::<Runtime>::ensure_killed(round));
+		});
+	}
+
+	#[test]
+	fn call_to_elect_should_prevent_any_submission() {
+		ExtBuilder::full().build_and_execute(|| {
+			roll_to_signed_open();
+			assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+
+			// load a solution into the verifier
+			let paged = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false).unwrap();
+			load_signed_for_verification_and_start_and_roll_to_verified(99, paged, 0);
+
+			assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(20));
+
+			// fetch one page.
+			assert!(MultiBlock::elect(MultiBlock::msp()).is_ok());
+
+			// try submit one signed page:
+			assert_noop!(
+				SignedPallet::submit_page(RuntimeOrigin::signed(999), 0, Default::default()),
+				crate::signed::Error::<Runtime>::PhaseNotSigned,
+			);
+			assert_noop!(
+				SignedPallet::register(RuntimeOrigin::signed(999), Default::default()),
+				crate::signed::Error::<Runtime>::PhaseNotSigned,
+			);
+			assert_storage_noop!(assert!(<UnsignedPallet as ValidateUnsigned>::pre_dispatch(
+				&unsigned::Call::submit_unsigned { paged_solution: Default::default() }
+			)
+			.is_err()));
+		});
+	}
+
+	#[test]
+	fn multi_page_elect_fallback_works() {
+		ExtBuilder::full().fallback_mode(FallbackModes::Onchain).build_and_execute(|| {
+			roll_to_signed_open();
+
+			// same targets, but voters from page 2 (1, 2, 3, 4, see `mock/staking`).
+			assert_eq!(
+				MultiBlock::elect(2).unwrap(),
+				BoundedSupports(bounded_vec![
+					(10, BoundedSupport { total: 15, voters: bounded_vec![(1, 10), (4, 5)] }),
+					(
+						40,
+						BoundedSupport {
+							total: 25,
+							voters: bounded_vec![(2, 10), (3, 10), (4, 5)]
+						}
+					)
+				])
+			);
+			// page 1 of voters
+			assert_eq!(
+				MultiBlock::elect(1).unwrap(),
+				BoundedSupports(bounded_vec![
+					(10, BoundedSupport { total: 15, voters: bounded_vec![(5, 5), (8, 10)] }),
+					(
+						30,
+						BoundedSupport {
+							total: 25,
+							voters: bounded_vec![(5, 5), (6, 10), (7, 10)]
+						}
+					)
+				])
+			);
+			// self votes
+			assert_eq!(
+				MultiBlock::elect(0).unwrap(),
+				BoundedSupports(bounded_vec![
+					(30, BoundedSupport { total: 30, voters: bounded_vec![(30, 30)] }),
+					(40, BoundedSupport { total: 40, voters: bounded_vec![(40, 40)] })
+				])
+			);
+
+			assert_eq!(
+				multi_block_events(),
+				vec![
+					Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+					Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+					Event::PhaseTransitioned { from: Phase::Signed, to: Phase::Export(2) },
+					Event::PhaseTransitioned { from: Phase::Export(1), to: Phase::Off }
+				]
+			);
+			assert_eq!(verifier_events(), vec![]);
+
+			// This will set us to emergency phase, because we don't know wtf to do.
+			assert_eq!(MultiBlock::current_phase(), Phase::Off);
+		});
+	}
+
+	#[test]
+	fn multi_page_fallback_shortcut_to_msp_works() {
+		ExtBuilder::full().fallback_mode(FallbackModes::Onchain).build_and_execute(|| {
+			roll_to_signed_open();
+
+			// but then we immediately call `elect`, this will work
+			assert!(MultiBlock::elect(0).is_ok());
+
+			assert_eq!(
+				multi_block_events(),
+				vec![
+					Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+					Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+					Event::PhaseTransitioned { from: Phase::Signed, to: Phase::Off }
+				]
+			);
+
+			// This will set us to the off phase, since fallback saved us.
+			assert_eq!(MultiBlock::current_phase(), Phase::Off);
+		});
+	}
+
+	#[test]
+	fn elect_call_when_not_ongoing() {
+		ExtBuilder::full().fallback_mode(FallbackModes::Onchain).build_and_execute(|| {
+			roll_to_snapshot_created();
+			assert_eq!(MultiBlock::ongoing(), true);
+			assert!(MultiBlock::elect(0).is_ok());
+		});
+		ExtBuilder::full().fallback_mode(FallbackModes::Onchain).build_and_execute(|| {
+			roll_to(10);
+			assert_eq!(MultiBlock::ongoing(), false);
+			assert_eq!(MultiBlock::elect(0), Err(ElectionError::NotOngoing));
+		});
+	}
+}
+
+#[cfg(test)]
+mod admin_ops {
+	use super::*;
+	use crate::mock::*;
+	use frame_support::assert_ok;
+
+	#[test]
+	fn set_solution_emergency_works() {
+		ExtBuilder::full().build_and_execute(|| {
+			roll_to_signed_open();
+
+			// we get a call to elect(0). this will cause emergency, since no fallback is allowed.
+			assert_eq!(
+				MultiBlock::elect(0),
+				Err(ElectionError::Fallback("Emergency phase started.".to_string()))
+			);
+			assert_eq!(MultiBlock::current_phase(), Phase::Emergency);
+
+			// we can now set the solution to emergency.
+			let (emergency, score) = emergency_solution();
+			assert_ok!(MultiBlock::manage(
+				RuntimeOrigin::root(),
+				AdminOperation::EmergencySetSolution(Box::new(emergency), score)
+			));
+
+			assert_eq!(MultiBlock::current_phase(), Phase::Emergency);
+			assert_ok!(MultiBlock::elect(0));
+			assert_eq!(MultiBlock::current_phase(), Phase::Off);
+
+			assert_eq!(
+				multi_block_events(),
+				vec![
+					Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+					Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+					Event::PhaseTransitioned { from: Phase::Signed, to: Phase::Emergency },
+					Event::PhaseTransitioned { from: Phase::Emergency, to: Phase::Off }
+				]
+			);
+			assert_eq!(
+				verifier_events(),
+				vec![verifier::Event::Queued(
+					ElectionScore { minimal_stake: 55, sum_stake: 130, sum_stake_squared: 8650 },
+					None
+				)]
+			);
+		})
+	}
+
+	#[test]
+	fn trigger_fallback_works() {
+		ExtBuilder::full()
+			.fallback_mode(FallbackModes::Emergency)
+			.build_and_execute(|| {
+				roll_to_signed_open();
+
+				// we get a call to elect(0). this will cause emergency, since no fallback is
+				// allowed.
+				assert_eq!(
+					MultiBlock::elect(0),
+					Err(ElectionError::Fallback("Emergency phase started.".to_string()))
+				);
+				assert_eq!(MultiBlock::current_phase(), Phase::Emergency);
+
+				// we can now set the solution to emergency, assuming fallback is set to onchain
+				FallbackMode::set(FallbackModes::Onchain);
+				assert_ok!(MultiBlock::manage(
+					RuntimeOrigin::root(),
+					AdminOperation::EmergencyFallback
+				));
+
+				assert_eq!(MultiBlock::current_phase(), Phase::Emergency);
+				assert_ok!(MultiBlock::elect(0));
+				assert_eq!(MultiBlock::current_phase(), Phase::Off);
+
+				assert_eq!(
+					multi_block_events(),
+					vec![
+						Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+						Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+						Event::PhaseTransitioned { from: Phase::Signed, to: Phase::Emergency },
+						Event::PhaseTransitioned { from: Phase::Emergency, to: Phase::Off }
+					]
+				);
+				assert_eq!(
+					verifier_events(),
+					vec![verifier::Event::Queued(
+						ElectionScore { minimal_stake: 15, sum_stake: 40, sum_stake_squared: 850 },
+						None
+					)]
+				);
+			})
+	}
+
+	#[should_panic]
+	#[test]
+	fn force_rotate_round() {
+		// clears the snapshot and verifier data.
+		// leaves the signed data as is since we bump the round.
+		todo!();
+	}
+
+	#[test]
+	fn set_minimum_solution_score() {
+		ExtBuilder::full().build_and_execute(|| {
+			assert_eq!(VerifierPallet::minimum_score(), None);
+			assert_ok!(MultiBlock::manage(
+				RuntimeOrigin::root(),
+				AdminOperation::SetMinUntrustedScore(ElectionScore {
+					minimal_stake: 100,
+					..Default::default()
+				})
+			));
+			assert_eq!(
+				VerifierPallet::minimum_score().unwrap(),
+				ElectionScore { minimal_stake: 100, ..Default::default() }
+			);
+		});
+	}
+}
+
+#[cfg(test)]
+mod snapshot {
+
+	#[test]
+	#[should_panic]
+	fn fetches_exact_voters() {
+		todo!("fetches correct number of voters, based on T::VoterSnapshotPerBlock");
+	}
+
+	#[test]
+	#[should_panic]
+	fn fetches_exact_targets() {
+		todo!("fetches correct number of targets, based on T::TargetSnapshotPerBlock");
+	}
+
+	#[test]
+	#[should_panic]
+	fn fingerprint_works() {
+		todo!("one hardcoded test of the fingerprint value.");
+	}
+
+	#[test]
+	#[should_panic]
+	fn snapshot_size_2second_weight() {
+		todo!()
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/mock/mod.rs b/substrate/frame/election-provider-multi-block/src/mock/mod.rs
new file mode 100644
index 00000000000..5c68494f66b
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/mock/mod.rs
@@ -0,0 +1,700 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod signed;
+mod staking;
+mod weight_info;
+
+use super::*;
+use crate::{
+	self as multi_block,
+	signed::{self as signed_pallet, HoldReason},
+	unsigned::{
+		self as unsigned_pallet,
+		miner::{MinerConfig, OffchainMinerError, OffchainWorkerMiner},
+	},
+	verifier::{self as verifier_pallet, AsynchronousVerifier, Status},
+};
+use codec::{Decode, Encode, MaxEncodedLen};
+use frame_election_provider_support::{
+	bounds::{ElectionBounds, ElectionBoundsBuilder},
+	InstantElectionProvider, NposSolution, SequentialPhragmen,
+};
+pub use frame_support::{assert_noop, assert_ok};
+use frame_support::{
+	derive_impl, parameter_types,
+	traits::{fungible::InspectHold, Hooks},
+	weights::{constants, Weight},
+};
+use frame_system::EnsureRoot;
+use parking_lot::RwLock;
+pub use signed::*;
+use sp_core::{
+	offchain::{
+		testing::{PoolState, TestOffchainExt, TestTransactionPoolExt},
+		OffchainDbExt, OffchainWorkerExt, TransactionPoolExt,
+	},
+	ConstBool,
+};
+use sp_npos_elections::EvaluateSupport;
+use sp_runtime::{
+	bounded_vec,
+	traits::{BlakeTwo256, IdentityLookup},
+	BuildStorage, PerU16, Perbill,
+};
+pub use staking::*;
+use std::{sync::Arc, vec};
+
+pub type Extrinsic = sp_runtime::testing::TestXt<RuntimeCall, ()>;
+
+pub type Balance = u64;
+pub type AccountId = u64;
+pub type BlockNumber = u64;
+pub type VoterIndex = u32;
+pub type TargetIndex = u16;
+
+frame_support::construct_runtime!(
+	pub enum Runtime  {
+		System: frame_system,
+		Balances: pallet_balances,
+		MultiBlock: multi_block,
+		SignedPallet: signed_pallet,
+		VerifierPallet: verifier_pallet,
+		UnsignedPallet: unsigned_pallet,
+	}
+);
+
+frame_election_provider_support::generate_solution_type!(
+	pub struct TestNposSolution::<
+		VoterIndex = VoterIndex,
+		TargetIndex = TargetIndex,
+		Accuracy = PerU16,
+		MaxVoters = ConstU32::<2_000>
+	>(16)
+);
+
+#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
+impl frame_system::Config for Runtime {
+	type Hashing = BlakeTwo256;
+	type AccountId = AccountId;
+	type Lookup = IdentityLookup<Self::AccountId>;
+	type BlockLength = ();
+	type BlockWeights = BlockWeights;
+	type AccountData = pallet_balances::AccountData<Balance>;
+	type Block = frame_system::mocking::MockBlock<Self>;
+}
+
+const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75);
+parameter_types! {
+	pub const ExistentialDeposit: Balance = 1;
+	pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights
+		::with_sensible_defaults(
+			Weight::from_parts(2u64 * constants::WEIGHT_REF_TIME_PER_SECOND, u64::MAX),
+			NORMAL_DISPATCH_RATIO,
+		);
+}
+
+#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)]
+impl pallet_balances::Config for Runtime {
+	type Balance = Balance;
+	type DustRemoval = ();
+	type ExistentialDeposit = ExistentialDeposit;
+	type AccountStore = System;
+	type MaxLocks = ();
+	type MaxReserves = ();
+	type ReserveIdentifier = [u8; 8];
+	type WeightInfo = ();
+}
+
+#[allow(unused)]
+#[derive(Clone)]
+pub enum FallbackModes {
+	// TODO: test for this mode
+	Continue,
+	Emergency,
+	Onchain,
+}
+
+parameter_types! {
+	pub static Pages: PageIndex = 3;
+	pub static UnsignedPhase: BlockNumber = 5;
+	pub static SignedPhase: BlockNumber = 5;
+	pub static SignedValidationPhase: BlockNumber = 5;
+
+	pub static FallbackMode: FallbackModes = FallbackModes::Emergency;
+	pub static MinerTxPriority: u64 = 100;
+	pub static SolutionImprovementThreshold: Perbill = Perbill::zero();
+	pub static OffchainRepeat: BlockNumber = 5;
+	pub static MinerMaxLength: u32 = 256;
+	pub static MaxVotesPerVoter: u32 = <TestNposSolution as NposSolution>::LIMIT as u32;
+
+	// by default we stick to 3 pages to host our 12 voters.
+	pub static VoterSnapshotPerBlock: VoterIndex = 4;
+	// and 4 targets, whom we fetch all.
+	pub static TargetSnapshotPerBlock: TargetIndex = 4;
+	pub static Lookahead: BlockNumber = 0;
+
+	// we have 12 voters in the default setting, this should be enough to make sure they are not
+	// trimmed accidentally in any test.
+	#[derive(Encode, Decode, PartialEq, Eq, Debug, scale_info::TypeInfo, MaxEncodedLen)]
+	pub static MaxBackersPerWinner: u32 = 12;
+	pub static MaxBackersPerWinnerFinal: u32 = 12;
+	// we have 4 targets in total and we desire `Desired` thereof, no single page can represent more
+	// than the min of these two.
+	#[derive(Encode, Decode, PartialEq, Eq, Debug, scale_info::TypeInfo, MaxEncodedLen)]
+	pub static MaxWinnersPerPage: u32 = (staking::Targets::get().len() as u32).min(staking::DesiredTargets::get());
+}
+
+impl crate::verifier::Config for Runtime {
+	type RuntimeEvent = RuntimeEvent;
+	type SolutionImprovementThreshold = SolutionImprovementThreshold;
+	type MaxBackersPerWinnerFinal = MaxBackersPerWinnerFinal;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
+	type MaxWinnersPerPage = MaxWinnersPerPage;
+	type SolutionDataProvider = signed::DualSignedPhase;
+	type WeightInfo = ();
+}
+
+impl crate::unsigned::Config for Runtime {
+	type OffchainRepeat = OffchainRepeat;
+	type MinerTxPriority = MinerTxPriority;
+	type OffchainSolver = SequentialPhragmen<Self::AccountId, Perbill>;
+	type WeightInfo = ();
+}
+
+impl MinerConfig for Runtime {
+	type AccountId = AccountId;
+	type Hash = <Runtime as frame_system::Config>::Hash;
+	type MaxLength = MinerMaxLength;
+	type Pages = Pages;
+	type MaxVotesPerVoter = MaxVotesPerVoter;
+	type Solution = TestNposSolution;
+	type Solver = SequentialPhragmen<AccountId, Perbill>;
+	type TargetSnapshotPerBlock = TargetSnapshotPerBlock;
+	type VoterSnapshotPerBlock = VoterSnapshotPerBlock;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
+	type MaxBackersPerWinnerFinal = MaxBackersPerWinnerFinal;
+	type MaxWinnersPerPage = MaxWinnersPerPage;
+}
+
+impl crate::Config for Runtime {
+	type RuntimeEvent = RuntimeEvent;
+	type SignedPhase = SignedPhase;
+	type SignedValidationPhase = SignedValidationPhase;
+	type UnsignedPhase = UnsignedPhase;
+	type DataProvider = staking::MockStaking;
+	type Fallback = MockFallback;
+	type TargetSnapshotPerBlock = TargetSnapshotPerBlock;
+	type VoterSnapshotPerBlock = VoterSnapshotPerBlock;
+	type Lookahead = Lookahead;
+	type MinerConfig = Self;
+	type WeightInfo = weight_info::DualMockWeightInfo;
+	type Verifier = VerifierPallet;
+	type AdminOrigin = EnsureRoot<AccountId>;
+	type Pages = Pages;
+}
+
+parameter_types! {
+	pub static OnChainElectionBounds: ElectionBounds = ElectionBoundsBuilder::default().build();
+}
+
+impl onchain::Config for Runtime {
+	type DataProvider = staking::MockStaking;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
+	type MaxWinnersPerPage = MaxWinnersPerPage;
+	type Sort = ConstBool<true>;
+	type Solver = SequentialPhragmen<AccountId, sp_runtime::PerU16, ()>;
+	type System = Runtime;
+	type WeightInfo = ();
+	type Bounds = OnChainElectionBounds;
+}
+
+pub struct MockFallback;
+impl ElectionProvider for MockFallback {
+	type AccountId = AccountId;
+	type BlockNumber = u64;
+	type Error = String;
+	type DataProvider = staking::MockStaking;
+	type Pages = ConstU32<1>;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
+	type MaxWinnersPerPage = MaxWinnersPerPage;
+
+	fn elect(_remaining: PageIndex) -> Result<BoundedSupportsOf<Self>, Self::Error> {
+		unreachable!()
+	}
+
+	fn ongoing() -> bool {
+		false
+	}
+}
+
+impl InstantElectionProvider for MockFallback {
+	fn instant_elect(
+		voters: Vec<VoterOf<Runtime>>,
+		targets: Vec<Self::AccountId>,
+		desired_targets: u32,
+	) -> Result<BoundedSupportsOf<Self>, Self::Error> {
+		match FallbackMode::get() {
+			FallbackModes::Continue =>
+				crate::Continue::<Runtime>::instant_elect(voters, targets, desired_targets)
+					.map_err(|x| x.to_string()),
+			FallbackModes::Emergency => crate::InitiateEmergencyPhase::<Runtime>::instant_elect(
+				voters,
+				targets,
+				desired_targets,
+			)
+			.map_err(|x| x.to_string()),
+			FallbackModes::Onchain => onchain::OnChainExecution::<Runtime>::instant_elect(
+				voters,
+				targets,
+				desired_targets,
+			)
+			.map_err(|e| format!("onchain fallback failed: {:?}", e)),
+		}
+	}
+	fn bother() -> bool {
+		matches!(FallbackMode::get(), FallbackModes::Onchain)
+	}
+}
+
+impl<LocalCall> frame_system::offchain::CreateTransactionBase<LocalCall> for Runtime
+where
+	RuntimeCall: From<LocalCall>,
+{
+	type RuntimeCall = RuntimeCall;
+	type Extrinsic = Extrinsic;
+}
+
+impl<LocalCall> frame_system::offchain::CreateInherent<LocalCall> for Runtime
+where
+	RuntimeCall: From<LocalCall>,
+{
+	fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic {
+		Extrinsic::new_bare(call)
+	}
+}
+
+pub struct ExtBuilder {}
+
+impl ExtBuilder {
+	pub fn full() -> Self {
+		Self {}
+	}
+
+	pub fn verifier() -> Self {
+		SignedPhase::set(0);
+		SignedValidationPhase::set(0);
+		signed::SignedPhaseSwitch::set(signed::SignedSwitch::Mock);
+		Self {}
+	}
+
+	pub fn unsigned() -> Self {
+		SignedPhase::set(0);
+		SignedValidationPhase::set(0);
+		signed::SignedPhaseSwitch::set(signed::SignedSwitch::Mock);
+		Self {}
+	}
+
+	pub fn signed() -> Self {
+		UnsignedPhase::set(0);
+		Self {}
+	}
+}
+
+impl ExtBuilder {
+	pub(crate) fn max_backers_per_winner(self, c: u32) -> Self {
+		MaxBackersPerWinner::set(c);
+		self
+	}
+	pub(crate) fn max_backers_per_winner_final(self, c: u32) -> Self {
+		MaxBackersPerWinnerFinal::set(c);
+		self
+	}
+	pub(crate) fn miner_tx_priority(self, p: u64) -> Self {
+		MinerTxPriority::set(p);
+		self
+	}
+	pub(crate) fn solution_improvement_threshold(self, p: Perbill) -> Self {
+		SolutionImprovementThreshold::set(p);
+		self
+	}
+	pub(crate) fn pages(self, pages: PageIndex) -> Self {
+		Pages::set(pages);
+		self
+	}
+	pub(crate) fn lookahead(self, lookahead: BlockNumber) -> Self {
+		Lookahead::set(lookahead);
+		self
+	}
+	pub(crate) fn voter_per_page(self, count: u32) -> Self {
+		VoterSnapshotPerBlock::set(count);
+		self
+	}
+	pub(crate) fn miner_max_length(self, len: u32) -> Self {
+		MinerMaxLength::set(len);
+		self
+	}
+	pub(crate) fn desired_targets(self, t: u32) -> Self {
+		staking::DesiredTargets::set(t);
+		self
+	}
+	pub(crate) fn signed_phase(self, d: BlockNumber, v: BlockNumber) -> Self {
+		SignedPhase::set(d);
+		SignedValidationPhase::set(v);
+		self
+	}
+	pub(crate) fn unsigned_phase(self, d: BlockNumber) -> Self {
+		UnsignedPhase::set(d);
+		self
+	}
+	pub(crate) fn signed_validation_phase(self, d: BlockNumber) -> Self {
+		SignedValidationPhase::set(d);
+		self
+	}
+	#[allow(unused)]
+	pub(crate) fn add_voter(self, who: AccountId, stake: Balance, targets: Vec<AccountId>) -> Self {
+		staking::VOTERS.with(|v| v.borrow_mut().push((who, stake, targets.try_into().unwrap())));
+		self
+	}
+	pub(crate) fn fallback_mode(self, mode: FallbackModes) -> Self {
+		FallbackMode::set(mode);
+		self
+	}
+	pub(crate) fn build_unchecked(self) -> sp_io::TestExternalities {
+		sp_tracing::try_init_simple();
+		let mut storage =
+			frame_system::GenesisConfig::<Runtime>::default().build_storage().unwrap();
+
+		let _ = pallet_balances::GenesisConfig::<Runtime> {
+			balances: vec![
+				// bunch of account for submitting stuff only.
+				(91, 100),
+				(92, 100),
+				(93, 100),
+				(94, 100),
+				(95, 100),
+				(96, 100),
+				(97, 100),
+				(99, 100),
+				(999, 100),
+				(9999, 100),
+			],
+			..Default::default()
+		}
+		.assimilate_storage(&mut storage);
+
+		sp_io::TestExternalities::from(storage)
+	}
+
+	/// Warning: this does not execute the post-sanity-checks.
+	pub(crate) fn build_offchainify(self) -> (sp_io::TestExternalities, Arc<RwLock<PoolState>>) {
+		let mut ext = self.build_unchecked();
+		let (offchain, _offchain_state) = TestOffchainExt::new();
+		let (pool, pool_state) = TestTransactionPoolExt::new();
+
+		ext.register_extension(OffchainDbExt::new(offchain.clone()));
+		ext.register_extension(OffchainWorkerExt::new(offchain));
+		ext.register_extension(TransactionPoolExt::new(pool));
+
+		(ext, pool_state)
+	}
+
+	/// Build the externalities, and execute the given  s`test` closure with it.
+	pub(crate) fn build_and_execute(self, test: impl FnOnce() -> ()) {
+		let mut ext = self.build_unchecked();
+		ext.execute_with_sanity_checks(test);
+	}
+}
+
+pub trait ExecuteWithSanityChecks {
+	fn execute_with_sanity_checks(&mut self, test: impl FnOnce() -> ());
+}
+
+impl ExecuteWithSanityChecks for sp_io::TestExternalities {
+	fn execute_with_sanity_checks(&mut self, test: impl FnOnce() -> ()) {
+		self.execute_with(test);
+		self.execute_with(all_pallets_sanity_checks)
+	}
+}
+
+fn all_pallets_sanity_checks() {
+	let now = System::block_number();
+	let _ = VerifierPallet::do_try_state(now).unwrap();
+	let _ = UnsignedPallet::do_try_state(now).unwrap();
+	let _ = MultiBlock::do_try_state(now).unwrap();
+	let _ = SignedPallet::do_try_state(now).unwrap();
+}
+
+/// Fully verify a solution.
+///
+/// This will progress the blocks until the verifier pallet is done verifying it.
+///
+/// The solution must have already been loaded via `load_and_start_verification`.
+///
+/// Return the final supports, which is the outcome. If this succeeds, then the valid variant of the
+/// `QueuedSolution` form `verifier` is ready to be read.
+pub fn roll_to_full_verification() -> Vec<BoundedSupportsOf<MultiBlock>> {
+	// we must be ready to verify.
+	assert_eq!(VerifierPallet::status(), Status::Ongoing(Pages::get() - 1));
+
+	while matches!(VerifierPallet::status(), Status::Ongoing(_)) {
+		roll_to(System::block_number() + 1);
+	}
+
+	(MultiBlock::lsp()..=MultiBlock::msp())
+		.map(|p| VerifierPallet::get_queued_solution_page(p).unwrap_or_default())
+		.collect::<Vec<_>>()
+}
+
+/// Generate a single page of `TestNposSolution` from the give supports.
+///
+/// All of the voters in this support must live in a single page of the snapshot, noted by
+/// `snapshot_page`.
+pub fn solution_from_supports(
+	supports: sp_npos_elections::Supports<AccountId>,
+	snapshot_page: PageIndex,
+) -> TestNposSolution {
+	let staked = sp_npos_elections::supports_to_staked_assignment(supports);
+	let assignments = sp_npos_elections::assignment_staked_to_ratio_normalized(staked).unwrap();
+
+	let voters = crate::Snapshot::<Runtime>::voters(snapshot_page).unwrap();
+	let targets = crate::Snapshot::<Runtime>::targets().unwrap();
+	let voter_index = helpers::voter_index_fn_linear::<Runtime>(&voters);
+	let target_index = helpers::target_index_fn_linear::<Runtime>(&targets);
+
+	TestNposSolution::from_assignment(&assignments, &voter_index, &target_index).unwrap()
+}
+
+/// Generate a raw paged solution from the given vector of supports.
+///
+/// Given vector must be aligned with the snapshot, at most need to be 'pagified' which we do
+/// internally.
+pub fn raw_paged_from_supports(
+	paged_supports: Vec<sp_npos_elections::Supports<AccountId>>,
+	round: u32,
+) -> PagedRawSolution<Runtime> {
+	let score = {
+		let flattened = paged_supports.iter().cloned().flatten().collect::<Vec<_>>();
+		flattened.evaluate()
+	};
+
+	let solution_pages = paged_supports
+		.pagify(Pages::get())
+		.map(|(page_index, page_support)| solution_from_supports(page_support.to_vec(), page_index))
+		.collect::<Vec<_>>();
+
+	let solution_pages = solution_pages.try_into().unwrap();
+	PagedRawSolution { solution_pages, score, round }
+}
+
+/// ensure that the snapshot fully exists.
+///
+/// NOTE: this should not be used that often, because we check snapshot in sanity checks, which are
+/// called ALL THE TIME.
+pub fn assert_full_snapshot() {
+	assert_ok!(Snapshot::<Runtime>::ensure_snapshot(true, Pages::get()));
+}
+
+/// ensure that the no snapshot exists.
+///
+/// NOTE: this should not be used that often, because we check snapshot in sanity checks, which are
+/// called ALL THE TIME.
+pub fn assert_none_snapshot() {
+	assert_ok!(Snapshot::<Runtime>::ensure_snapshot(false, Pages::get()));
+}
+
+/// Simple wrapper for mining a new solution. Just more handy in case the interface of mine solution
+/// changes.
+///
+/// For testing, we never want to do reduce.
+pub fn mine_full_solution() -> Result<PagedRawSolution<Runtime>, OffchainMinerError<Runtime>> {
+	OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false)
+}
+
+/// Same as [`mine_full_solution`] but with custom pages.
+pub fn mine_solution(
+	pages: PageIndex,
+) -> Result<PagedRawSolution<Runtime>, OffchainMinerError<Runtime>> {
+	OffchainWorkerMiner::<Runtime>::mine_solution(pages, false)
+}
+
+/// Assert that `count` voters exist across `pages` number of pages.
+pub fn ensure_voters(pages: PageIndex, count: usize) {
+	assert_eq!(crate::Snapshot::<Runtime>::voter_pages(), pages);
+	assert_eq!(crate::Snapshot::<Runtime>::voters_iter_flattened().count(), count);
+}
+
+/// Assert that `count` targets exist across `pages` number of pages.
+pub fn ensure_targets(pages: PageIndex, count: usize) {
+	assert_eq!(crate::Snapshot::<Runtime>::target_pages(), pages);
+	assert_eq!(crate::Snapshot::<Runtime>::targets().unwrap().len(), count);
+}
+
+/// get the events of the multi-block pallet.
+pub fn multi_block_events() -> Vec<crate::Event<Runtime>> {
+	System::events()
+		.into_iter()
+		.map(|r| r.event)
+		.filter_map(|e| if let RuntimeEvent::MultiBlock(inner) = e { Some(inner) } else { None })
+		.collect::<Vec<_>>()
+}
+
+/// get the events of the verifier pallet.
+pub fn verifier_events() -> Vec<crate::verifier::Event<Runtime>> {
+	System::events()
+		.into_iter()
+		.map(|r| r.event)
+		.filter_map(
+			|e| if let RuntimeEvent::VerifierPallet(inner) = e { Some(inner) } else { None },
+		)
+		.collect::<Vec<_>>()
+}
+
+/// proceed block number to `n`.
+pub fn roll_to(n: BlockNumber) {
+	crate::Pallet::<Runtime>::roll_to(
+		n,
+		matches!(SignedPhaseSwitch::get(), SignedSwitch::Real),
+		true,
+	);
+}
+
+/// proceed block number to whenever the snapshot is fully created (`Phase::Snapshot(0)`).
+pub fn roll_to_snapshot_created() {
+	while !matches!(MultiBlock::current_phase(), Phase::Snapshot(0)) {
+		roll_next()
+	}
+	assert_full_snapshot();
+}
+
+/// proceed block number to whenever the unsigned phase is open (`Phase::Unsigned(_)`).
+pub fn roll_to_unsigned_open() {
+	while !matches!(MultiBlock::current_phase(), Phase::Unsigned(_)) {
+		roll_next()
+	}
+}
+
+/// proceed block number to whenever the signed phase is open (`Phase::Signed(_)`).
+pub fn roll_to_signed_open() {
+	while !matches!(MultiBlock::current_phase(), Phase::Signed) {
+		roll_next();
+	}
+}
+
+/// proceed block number to whenever the signed validation phase is open
+/// (`Phase::SignedValidation(_)`).
+pub fn roll_to_signed_validation_open() {
+	while !matches!(MultiBlock::current_phase(), Phase::SignedValidation(_)) {
+		roll_next()
+	}
+}
+
+/// Proceed one block.
+pub fn roll_next() {
+	roll_to(System::block_number() + 1);
+}
+
+/// Proceed one block, and execute offchain workers as well.
+pub fn roll_next_with_ocw(maybe_pool: Option<Arc<RwLock<PoolState>>>) {
+	roll_to_with_ocw(System::block_number() + 1, maybe_pool)
+}
+
+/// proceed block number to `n`, while running all offchain workers as well.
+pub fn roll_to_with_ocw(n: BlockNumber, maybe_pool: Option<Arc<RwLock<PoolState>>>) {
+	use sp_runtime::traits::Dispatchable;
+	let now = System::block_number();
+	for i in now + 1..=n {
+		// check the offchain transaction pool, and if anything's there, submit it.
+		if let Some(ref pool) = maybe_pool {
+			pool.read()
+				.transactions
+				.clone()
+				.into_iter()
+				.map(|uxt| <Extrinsic as codec::Decode>::decode(&mut &*uxt).unwrap())
+				.for_each(|xt| {
+					xt.function.dispatch(frame_system::RawOrigin::None.into()).unwrap();
+				});
+			pool.try_write().unwrap().transactions.clear();
+		}
+
+		System::set_block_number(i);
+
+		MultiBlock::on_initialize(i);
+		VerifierPallet::on_initialize(i);
+		UnsignedPallet::on_initialize(i);
+		if matches!(SignedPhaseSwitch::get(), SignedSwitch::Real) {
+			SignedPallet::on_initialize(i);
+		}
+
+		MultiBlock::offchain_worker(i);
+		VerifierPallet::offchain_worker(i);
+		UnsignedPallet::offchain_worker(i);
+		if matches!(SignedPhaseSwitch::get(), SignedSwitch::Real) {
+			SignedPallet::offchain_worker(i);
+		}
+
+		// invariants must hold at the end of each block.
+		all_pallets_sanity_checks()
+	}
+}
+
+/// An invalid solution with any score.
+pub fn fake_solution(score: ElectionScore) -> PagedRawSolution<Runtime> {
+	PagedRawSolution {
+		score,
+		solution_pages: bounded_vec![Default::default()],
+		..Default::default()
+	}
+}
+
+/// A real solution that's valid, but has a really bad score.
+///
+/// This is different from `solution_from_supports` in that it does not require the snapshot to
+/// exist.
+// TODO: probably deprecate this.
+pub fn raw_paged_solution_low_score() -> PagedRawSolution<Runtime> {
+	PagedRawSolution {
+		solution_pages: vec![TestNposSolution {
+			// 2 targets, both voting for themselves
+			votes1: vec![(0, 0), (1, 2)],
+			..Default::default()
+		}]
+		.try_into()
+		.unwrap(),
+		round: 0,
+		score: ElectionScore { minimal_stake: 10, sum_stake: 20, sum_stake_squared: 200 },
+	}
+}
+
+/// Get the free and held balance of `who`.
+pub fn balances(who: AccountId) -> (Balance, Balance) {
+	(
+		Balances::free_balance(who),
+		Balances::balance_on_hold(&HoldReason::SignedSubmission.into(), &who),
+	)
+}
+
+/// Election bounds based on just the given count.
+pub fn bound_by_count(count: Option<u32>) -> DataProviderBounds {
+	DataProviderBounds { count: count.map(|x| x.into()), size: None }
+}
+
+pub fn emergency_solution() -> (BoundedSupportsOf<MultiBlock>, ElectionScore) {
+	let supports = onchain::OnChainExecution::<Runtime>::elect(0).unwrap();
+	let score = supports.evaluate();
+	(supports, score)
+}
diff --git a/substrate/frame/election-provider-multi-block/src/mock/signed.rs b/substrate/frame/election-provider-multi-block/src/mock/signed.rs
new file mode 100644
index 00000000000..33436374cd1
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/mock/signed.rs
@@ -0,0 +1,255 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use super::{Balance, Balances, Pages, Runtime, RuntimeEvent, SignedPallet, System};
+use crate::{
+	mock::{
+		balances, multi_block_events, roll_next, roll_to_signed_validation_open, verifier_events,
+		AccountId, RuntimeHoldReason, RuntimeOrigin, VerifierPallet,
+	},
+	signed::{self as signed_pallet, Event as SignedEvent, Submissions},
+	unsigned::miner::MinerConfig,
+	verifier::{self, AsynchronousVerifier, SolutionDataProvider, VerificationResult, Verifier},
+	Event, PadSolutionPages, PagedRawSolution, Pagify, Phase, SolutionOf,
+};
+use frame_election_provider_support::PageIndex;
+use frame_support::{
+	assert_ok, dispatch::PostDispatchInfo, parameter_types, traits::EstimateCallFee, BoundedVec,
+};
+use sp_npos_elections::ElectionScore;
+use sp_runtime::{traits::Zero, Perbill};
+
+parameter_types! {
+	pub static MockSignedNextSolution: Option<BoundedVec<SolutionOf<Runtime>, Pages>> = None;
+	pub static MockSignedNextScore: Option<ElectionScore> = Default::default();
+	pub static MockSignedResults: Vec<VerificationResult> = Default::default();
+}
+
+/// A simple implementation of the signed phase that can be controller by some static variables
+/// directly.
+///
+/// Useful for when you don't care too much about the signed phase.
+pub struct MockSignedPhase;
+impl SolutionDataProvider for MockSignedPhase {
+	type Solution = <Runtime as MinerConfig>::Solution;
+	fn get_page(page: PageIndex) -> Option<Self::Solution> {
+		MockSignedNextSolution::get().map(|i| i.get(page as usize).cloned().unwrap_or_default())
+	}
+
+	fn get_score() -> Option<ElectionScore> {
+		MockSignedNextScore::get()
+	}
+
+	fn report_result(result: verifier::VerificationResult) {
+		MOCK_SIGNED_RESULTS.with(|r| r.borrow_mut().push(result));
+	}
+}
+
+pub struct FixedCallFee;
+impl EstimateCallFee<signed_pallet::Call<Runtime>, Balance> for FixedCallFee {
+	fn estimate_call_fee(_: &signed_pallet::Call<Runtime>, _: PostDispatchInfo) -> Balance {
+		1
+	}
+}
+
+parameter_types! {
+	pub static SignedDepositBase: Balance = 5;
+	pub static SignedDepositPerPage: Balance = 1;
+	pub static SignedMaxSubmissions: u32 = 3;
+	pub static SignedRewardBase: Balance = 3;
+	pub static SignedPhaseSwitch: SignedSwitch = SignedSwitch::Real;
+	pub static BailoutGraceRatio: Perbill = Perbill::from_percent(20);
+}
+
+impl crate::signed::Config for Runtime {
+	type RuntimeEvent = RuntimeEvent;
+	type RuntimeHoldReason = RuntimeHoldReason;
+	type Currency = Balances;
+	type DepositBase = SignedDepositBase;
+	type DepositPerPage = SignedDepositPerPage;
+	type EstimateCallFee = FixedCallFee;
+	type MaxSubmissions = SignedMaxSubmissions;
+	type RewardBase = SignedRewardBase;
+	type BailoutGraceRatio = BailoutGraceRatio;
+	type WeightInfo = ();
+}
+
+/// Control which signed phase is being used.
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum SignedSwitch {
+	Mock,
+	Real,
+}
+
+pub struct DualSignedPhase;
+impl SolutionDataProvider for DualSignedPhase {
+	type Solution = <Runtime as MinerConfig>::Solution;
+	fn get_page(page: PageIndex) -> Option<Self::Solution> {
+		match SignedPhaseSwitch::get() {
+			SignedSwitch::Mock => MockSignedNextSolution::get()
+				.map(|i| i.get(page as usize).cloned().unwrap_or_default()),
+			SignedSwitch::Real => SignedPallet::get_page(page),
+		}
+	}
+
+	fn get_score() -> Option<ElectionScore> {
+		match SignedPhaseSwitch::get() {
+			SignedSwitch::Mock => MockSignedNextScore::get(),
+			SignedSwitch::Real => SignedPallet::get_score(),
+		}
+	}
+
+	fn report_result(result: verifier::VerificationResult) {
+		match SignedPhaseSwitch::get() {
+			SignedSwitch::Mock => MOCK_SIGNED_RESULTS.with(|r| r.borrow_mut().push(result)),
+			SignedSwitch::Real => SignedPallet::report_result(result),
+		}
+	}
+}
+
+/// get the events of the verifier pallet.
+pub fn signed_events() -> Vec<crate::signed::Event<Runtime>> {
+	System::events()
+		.into_iter()
+		.map(|r| r.event)
+		.filter_map(|e| if let RuntimeEvent::SignedPallet(inner) = e { Some(inner) } else { None })
+		.collect::<Vec<_>>()
+}
+
+/// Load a signed solution into its pallet.
+pub fn load_signed_for_verification(who: AccountId, paged: PagedRawSolution<Runtime>) {
+	let initial_balance = Balances::free_balance(&who);
+	assert_eq!(balances(who), (initial_balance, 0));
+
+	assert_ok!(SignedPallet::register(RuntimeOrigin::signed(who), paged.score));
+
+	assert_eq!(
+		balances(who),
+		(initial_balance - SignedDepositBase::get(), SignedDepositBase::get())
+	);
+
+	for (page_index, solution_page) in paged.solution_pages.pagify(Pages::get()) {
+		assert_ok!(SignedPallet::submit_page(
+			RuntimeOrigin::signed(who),
+			page_index,
+			Some(Box::new(solution_page.clone()))
+		));
+	}
+
+	let mut events = signed_events();
+	for _ in 0..Pages::get() {
+		let event = events.pop().unwrap();
+		assert!(matches!(event, SignedEvent::Stored(_, x, _) if x == who))
+	}
+	assert!(matches!(events.pop().unwrap(), SignedEvent::Registered(_, x, _) if x == who));
+
+	let full_deposit =
+		SignedDepositBase::get() + (Pages::get() as Balance) * SignedDepositPerPage::get();
+	assert_eq!(balances(who), (initial_balance - full_deposit, full_deposit));
+}
+
+/// Same as [`load_signed_for_verification`], but also goes forward to the beginning of the signed
+/// verification phase.
+pub fn load_signed_for_verification_and_start(
+	who: AccountId,
+	paged: PagedRawSolution<Runtime>,
+	_round: u32,
+) {
+	load_signed_for_verification(who, paged);
+
+	// now the solution should start being verified.
+	roll_to_signed_validation_open();
+	assert_eq!(
+		multi_block_events(),
+		vec![
+			Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+			Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+			Event::PhaseTransitioned { from: Phase::Signed, to: Phase::SignedValidation(20) }
+		]
+	);
+	assert_eq!(verifier_events(), vec![]);
+}
+
+/// Same as [`load_signed_for_verification_and_start`], but also goes forward enough blocks for the
+/// solution to be verified, assuming it is all correct.
+///
+/// In other words, it goes [`Pages`] blocks forward.
+pub fn load_signed_for_verification_and_start_and_roll_to_verified(
+	who: AccountId,
+	paged: PagedRawSolution<Runtime>,
+	_round: u32,
+) {
+	load_signed_for_verification(who, paged.clone());
+
+	// now the solution should start being verified.
+	roll_to_signed_validation_open();
+	assert_eq!(
+		multi_block_events(),
+		vec![
+			Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+			Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed },
+			Event::PhaseTransitioned { from: Phase::Signed, to: Phase::SignedValidation(20) }
+		]
+	);
+	assert_eq!(verifier_events(), vec![]);
+
+	// there is no queued solution prior to the last page of the solution getting verified
+	assert_eq!(<Runtime as crate::Config>::Verifier::queued_score(), None);
+
+	// roll to the block it is finalized.
+	for _ in 0..Pages::get() {
+		roll_next();
+	}
+
+	assert_eq!(
+		verifier_events(),
+		vec![
+			// TODO: these are hardcoded for 3 page.
+			verifier::Event::Verified(2, 2),
+			verifier::Event::Verified(1, 2),
+			verifier::Event::Verified(0, 2),
+			verifier::Event::Queued(paged.score, None),
+		]
+	);
+
+	// there is now a queued solution.
+	assert_eq!(<Runtime as crate::Config>::Verifier::queued_score(), Some(paged.score));
+}
+
+/// Load a full raw paged solution for verification.
+///
+/// More or less the equivalent of `load_signed_for_verification_and_start`, but when
+/// `SignedSwitch::Mock` is set.
+pub fn load_mock_signed_and_start(raw_paged: PagedRawSolution<Runtime>) {
+	assert_eq!(
+		SignedPhaseSwitch::get(),
+		SignedSwitch::Mock,
+		"you should not use this if mock phase is not being mocked"
+	);
+	MockSignedNextSolution::set(Some(raw_paged.solution_pages.pad_solution_pages(Pages::get())));
+	MockSignedNextScore::set(Some(raw_paged.score));
+
+	// Let's gooooo!
+	assert_ok!(<VerifierPallet as AsynchronousVerifier>::start());
+}
+
+/// Ensure that no submission data exists in `round` for `who`.
+pub fn assert_no_data_for(round: u32, who: AccountId) {
+	assert!(!Submissions::<Runtime>::leaderboard(round).into_iter().any(|(x, _)| x == who));
+	assert!(Submissions::<Runtime>::metadata_of(round, who).is_none());
+	assert!(Submissions::<Runtime>::pages_of(round, who).count().is_zero());
+}
diff --git a/substrate/frame/election-provider-multi-block/src/mock/staking.rs b/substrate/frame/election-provider-multi-block/src/mock/staking.rs
new file mode 100644
index 00000000000..bb4adb4d297
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/mock/staking.rs
@@ -0,0 +1,238 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use super::{AccountId, MaxVotesPerVoter, Runtime};
+use crate::VoterOf;
+use frame_election_provider_support::{
+	data_provider, DataProviderBounds, ElectionDataProvider, PageIndex, VoteWeight,
+};
+use frame_support::pallet_prelude::*;
+use sp_core::bounded_vec;
+use sp_std::prelude::*;
+
+pub type T = Runtime;
+
+frame_support::parameter_types! {
+	pub static Targets: Vec<AccountId> = vec![10, 20, 30, 40];
+	pub static Voters: Vec<VoterOf<Runtime>> = vec![
+		// page 2:
+		(1, 10, bounded_vec![10, 20]),
+		(2, 10, bounded_vec![30, 40]),
+		(3, 10, bounded_vec![40]),
+		(4, 10, bounded_vec![10, 20, 40]),
+		// page 1:
+		(5, 10, bounded_vec![10, 30, 40]),
+		(6, 10, bounded_vec![20, 30, 40]),
+		(7, 10, bounded_vec![20, 30]),
+		(8, 10, bounded_vec![10]),
+		// page 0: (self-votes)
+		(10, 10, bounded_vec![10]),
+		(20, 20, bounded_vec![20]),
+		(30, 30, bounded_vec![30]),
+		(40, 40, bounded_vec![40]),
+	];
+	pub static DesiredTargets: u32 = 2;
+	pub static EpochLength: u64 = 30;
+
+	pub static LastIteratedVoterIndex: Option<usize> = None;
+}
+
+pub struct MockStaking;
+impl ElectionDataProvider for MockStaking {
+	type AccountId = AccountId;
+	type BlockNumber = u64;
+	type MaxVotesPerVoter = MaxVotesPerVoter;
+
+	fn electable_targets(
+		bounds: DataProviderBounds,
+		remaining: PageIndex,
+	) -> data_provider::Result<Vec<AccountId>> {
+		let targets = Targets::get();
+
+		if remaining != 0 {
+			crate::log!(
+				warn,
+				"requesting targets for non-zero page, we will return the same page in any case"
+			);
+		}
+		if bounds.slice_exhausted(&targets) {
+			return Err("Targets too big")
+		}
+
+		Ok(targets)
+	}
+
+	fn electing_voters(
+		bounds: DataProviderBounds,
+		remaining: PageIndex,
+	) -> data_provider::Result<
+		Vec<(AccountId, VoteWeight, BoundedVec<AccountId, Self::MaxVotesPerVoter>)>,
+	> {
+		let mut voters = Voters::get();
+
+		// jump to the first non-iterated, if this is a follow up.
+		if let Some(index) = LastIteratedVoterIndex::get() {
+			voters = voters.iter().skip(index).cloned().collect::<Vec<_>>();
+		}
+
+		// take as many as you can.
+		if let Some(max_len) = bounds.count.map(|c| c.0 as usize) {
+			voters.truncate(max_len)
+		}
+
+		if voters.is_empty() {
+			return Ok(vec![])
+		}
+
+		if remaining > 0 {
+			let last = voters.last().cloned().unwrap();
+			LastIteratedVoterIndex::set(Some(
+				Voters::get().iter().position(|v| v == &last).map(|i| i + 1).unwrap(),
+			));
+		} else {
+			LastIteratedVoterIndex::set(None)
+		}
+
+		Ok(voters)
+	}
+
+	fn desired_targets() -> data_provider::Result<u32> {
+		Ok(DesiredTargets::get())
+	}
+
+	fn next_election_prediction(now: u64) -> u64 {
+		now + EpochLength::get() - now % EpochLength::get()
+	}
+
+	#[cfg(feature = "runtime-benchmarks")]
+	fn put_snapshot(
+		voters: Vec<(AccountId, VoteWeight, BoundedVec<AccountId, MaxVotesPerVoter>)>,
+		targets: Vec<AccountId>,
+		_target_stake: Option<VoteWeight>,
+	) {
+		Targets::set(targets);
+		Voters::set(voters);
+	}
+
+	#[cfg(feature = "runtime-benchmarks")]
+	fn clear() {
+		Targets::set(vec![]);
+		Voters::set(vec![]);
+	}
+
+	#[cfg(feature = "runtime-benchmarks")]
+	fn add_voter(
+		voter: AccountId,
+		weight: VoteWeight,
+		targets: BoundedVec<AccountId, MaxVotesPerVoter>,
+	) {
+		let mut current = Voters::get();
+		current.push((voter, weight, targets));
+		Voters::set(current);
+	}
+
+	#[cfg(feature = "runtime-benchmarks")]
+	fn add_target(target: AccountId) {
+		use super::ExistentialDeposit;
+
+		let mut current = Targets::get();
+		current.push(target);
+		Targets::set(current);
+
+		// to be on-par with staking, we add a self vote as well. the stake is really not that
+		// important.
+		let mut current = Voters::get();
+		current.push((target, ExistentialDeposit::get() as u64, vec![target].try_into().unwrap()));
+		Voters::set(current);
+	}
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+	use crate::mock::{bound_by_count, ExtBuilder};
+
+	#[test]
+	fn targets() {
+		ExtBuilder::full().build_and_execute(|| {
+			assert_eq!(Targets::get().len(), 4);
+
+			// any non-zero page returns page zero.
+			assert_eq!(MockStaking::electable_targets(bound_by_count(None), 2).unwrap().len(), 4);
+			assert_eq!(MockStaking::electable_targets(bound_by_count(None), 1).unwrap().len(), 4);
+
+			// 0 is also fine.
+			assert_eq!(MockStaking::electable_targets(bound_by_count(None), 0).unwrap().len(), 4);
+
+			// fetch less targets is error, because targets cannot be sorted (both by MockStaking,
+			// and the real staking).
+			assert!(MockStaking::electable_targets(bound_by_count(Some(2)), 0).is_err());
+
+			// more targets is fine.
+			assert!(MockStaking::electable_targets(bound_by_count(Some(4)), 0).is_ok());
+			assert!(MockStaking::electable_targets(bound_by_count(Some(5)), 0).is_ok());
+		});
+	}
+
+	#[test]
+	fn multi_page_votes() {
+		ExtBuilder::full().build_and_execute(|| {
+			assert_eq!(MockStaking::electing_voters(bound_by_count(None), 0).unwrap().len(), 12);
+			assert!(LastIteratedVoterIndex::get().is_none());
+
+			assert_eq!(
+				MockStaking::electing_voters(bound_by_count(Some(4)), 0)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![1, 2, 3, 4],
+			);
+			assert!(LastIteratedVoterIndex::get().is_none());
+
+			assert_eq!(
+				MockStaking::electing_voters(bound_by_count(Some(4)), 2)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![1, 2, 3, 4],
+			);
+			assert_eq!(LastIteratedVoterIndex::get().unwrap(), 4);
+
+			assert_eq!(
+				MockStaking::electing_voters(bound_by_count(Some(4)), 1)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![5, 6, 7, 8],
+			);
+			assert_eq!(LastIteratedVoterIndex::get().unwrap(), 8);
+
+			assert_eq!(
+				MockStaking::electing_voters(bound_by_count(Some(4)), 0)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![10, 20, 30, 40],
+			);
+			assert!(LastIteratedVoterIndex::get().is_none());
+		})
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/mock/weight_info.rs b/substrate/frame/election-provider-multi-block/src/mock/weight_info.rs
new file mode 100644
index 00000000000..a5f28f4fbd2
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/mock/weight_info.rs
@@ -0,0 +1,85 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// TODO: would love to ditch this, too big to handle here.
+
+use crate::{self as multi_block};
+use frame_support::weights::Weight;
+use sp_runtime::traits::Zero;
+
+frame_support::parameter_types! {
+	pub static MockWeightInfo: bool = false;
+}
+
+pub struct DualMockWeightInfo;
+impl multi_block::WeightInfo for DualMockWeightInfo {
+	fn on_initialize_nothing() -> Weight {
+		if MockWeightInfo::get() {
+			Zero::zero()
+		} else {
+			<() as multi_block::WeightInfo>::on_initialize_nothing()
+		}
+	}
+
+	fn on_initialize_into_snapshot_msp() -> Weight {
+		if MockWeightInfo::get() {
+			Zero::zero()
+		} else {
+			<() as multi_block::WeightInfo>::on_initialize_into_snapshot_msp()
+		}
+	}
+
+	fn on_initialize_into_snapshot_rest() -> Weight {
+		if MockWeightInfo::get() {
+			Zero::zero()
+		} else {
+			<() as multi_block::WeightInfo>::on_initialize_into_snapshot_rest()
+		}
+	}
+
+	fn on_initialize_into_signed() -> Weight {
+		if MockWeightInfo::get() {
+			Zero::zero()
+		} else {
+			<() as multi_block::WeightInfo>::on_initialize_into_signed()
+		}
+	}
+
+	fn on_initialize_into_signed_validation() -> Weight {
+		if MockWeightInfo::get() {
+			Zero::zero()
+		} else {
+			<() as multi_block::WeightInfo>::on_initialize_into_signed_validation()
+		}
+	}
+
+	fn on_initialize_into_unsigned() -> Weight {
+		if MockWeightInfo::get() {
+			Zero::zero()
+		} else {
+			<() as multi_block::WeightInfo>::on_initialize_into_unsigned()
+		}
+	}
+
+	fn manage() -> Weight {
+		if MockWeightInfo::get() {
+			Zero::zero()
+		} else {
+			<() as multi_block::WeightInfo>::manage()
+		}
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/signed/benchmarking.rs b/substrate/frame/election-provider-multi-block/src/signed/benchmarking.rs
new file mode 100644
index 00000000000..1e9facd72fb
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/signed/benchmarking.rs
@@ -0,0 +1,171 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::{
+	signed::{Config, Pallet, Submissions},
+	types::PagedRawSolution,
+	unsigned::miner::OffchainWorkerMiner,
+	CurrentPhase, Phase, Round,
+};
+use frame_benchmarking::v2::*;
+use frame_election_provider_support::ElectionDataProvider;
+use frame_support::pallet_prelude::*;
+use frame_system::RawOrigin;
+use sp_npos_elections::ElectionScore;
+use sp_std::boxed::Box;
+
+#[benchmarks(where T: crate::Config + crate::verifier::Config + crate::unsigned::Config)]
+mod benchmarks {
+	use super::*;
+
+	#[benchmark]
+	fn register_not_full() -> Result<(), BenchmarkError> {
+		CurrentPhase::<T>::put(Phase::Signed);
+		let round = Round::<T>::get();
+		let alice = crate::Pallet::<T>::funded_account("alice", 0);
+		let score = ElectionScore::default();
+
+		assert_eq!(Submissions::<T>::sorted_submitters(round).len(), 0);
+		#[block]
+		{
+			Pallet::<T>::register(RawOrigin::Signed(alice).into(), score)?;
+		}
+
+		assert_eq!(Submissions::<T>::sorted_submitters(round).len(), 1);
+		Ok(())
+	}
+
+	#[benchmark]
+	fn register_eject() -> Result<(), BenchmarkError> {
+		CurrentPhase::<T>::put(Phase::Signed);
+		let round = Round::<T>::get();
+
+		for i in 0..T::MaxSubmissions::get() {
+			let submitter = crate::Pallet::<T>::funded_account("submitter", i);
+			let score = ElectionScore { minimal_stake: i.into(), ..Default::default() };
+			Pallet::<T>::register(RawOrigin::Signed(submitter.clone()).into(), score)?;
+
+			// The first one, which will be ejected, has also submitted all pages
+			if i == 0 {
+				for p in 0..T::Pages::get() {
+					let page = Some(Default::default());
+					Pallet::<T>::submit_page(RawOrigin::Signed(submitter.clone()).into(), p, page)?;
+				}
+			}
+		}
+
+		let who = crate::Pallet::<T>::funded_account("who", 0);
+		let score =
+			ElectionScore { minimal_stake: T::MaxSubmissions::get().into(), ..Default::default() };
+
+		assert_eq!(
+			Submissions::<T>::sorted_submitters(round).len(),
+			T::MaxSubmissions::get() as usize
+		);
+
+		#[block]
+		{
+			Pallet::<T>::register(RawOrigin::Signed(who).into(), score)?;
+		}
+
+		assert_eq!(
+			Submissions::<T>::sorted_submitters(round).len(),
+			T::MaxSubmissions::get() as usize
+		);
+		Ok(())
+	}
+
+	#[benchmark]
+	fn submit_page() -> Result<(), BenchmarkError> {
+		T::DataProvider::set_next_election(crate::Pallet::<T>::reasonable_next_election());
+		crate::Pallet::<T>::roll_until_matches(|| {
+			matches!(CurrentPhase::<T>::get(), Phase::Signed)
+		});
+
+		// mine a full solution
+		let PagedRawSolution { score, solution_pages, .. } =
+			OffchainWorkerMiner::<T>::mine_solution(T::Pages::get(), false).unwrap();
+		let page = Some(Box::new(solution_pages[0].clone()));
+
+		// register alice
+		let alice = crate::Pallet::<T>::funded_account("alice", 0);
+		Pallet::<T>::register(RawOrigin::Signed(alice.clone()).into(), score)?;
+
+		#[block]
+		{
+			Pallet::<T>::submit_page(RawOrigin::Signed(alice).into(), 0, page)?;
+		}
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn unset_page() -> Result<(), BenchmarkError> {
+		T::DataProvider::set_next_election(crate::Pallet::<T>::reasonable_next_election());
+		crate::Pallet::<T>::roll_until_matches(|| {
+			matches!(CurrentPhase::<T>::get(), Phase::Signed)
+		});
+
+		// mine a full solution
+		let PagedRawSolution { score, solution_pages, .. } =
+			OffchainWorkerMiner::<T>::mine_solution(T::Pages::get(), false).unwrap();
+		let page = Some(Box::new(solution_pages[0].clone()));
+
+		// register alice
+		let alice = crate::Pallet::<T>::funded_account("alice", 0);
+		Pallet::<T>::register(RawOrigin::Signed(alice.clone()).into(), score)?;
+
+		// submit page
+		Pallet::<T>::submit_page(RawOrigin::Signed(alice.clone()).into(), 0, page)?;
+
+		#[block]
+		{
+			Pallet::<T>::submit_page(RawOrigin::Signed(alice).into(), 0, None)?;
+		}
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn bail() -> Result<(), BenchmarkError> {
+		CurrentPhase::<T>::put(Phase::Signed);
+		let alice = crate::Pallet::<T>::funded_account("alice", 0);
+
+		// register alice
+		let score = ElectionScore::default();
+		Pallet::<T>::register(RawOrigin::Signed(alice.clone()).into(), score)?;
+
+		// submit all pages
+		for p in 0..T::Pages::get() {
+			let page = Some(Default::default());
+			Pallet::<T>::submit_page(RawOrigin::Signed(alice.clone()).into(), p, page)?;
+		}
+
+		#[block]
+		{
+			Pallet::<T>::bail(RawOrigin::Signed(alice).into())?;
+		}
+
+		Ok(())
+	}
+
+	impl_benchmark_test_suite!(
+		Pallet,
+		crate::mock::ExtBuilder::signed().build_unchecked(),
+		crate::mock::Runtime
+	);
+}
diff --git a/substrate/frame/election-provider-multi-block/src/signed/mod.rs b/substrate/frame/election-provider-multi-block/src/signed/mod.rs
new file mode 100644
index 00000000000..1784a87b224
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/signed/mod.rs
@@ -0,0 +1,858 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! The signed phase of the multi-block election system.
+//!
+//! Signed submissions work on the basis of keeping a queue of submissions from random signed
+//! accounts, and sorting them based on the best claimed score to the worse.
+//!
+//! Once the time to evaluate the signed phase comes (`Phase::SignedValidation`), the solutions are
+//! checked from best-to-worse claim, and they end up in either of the 3 buckets:
+//!
+//! 1. If they are the first, correct solution (and consequently the best one, since we start
+//!    evaluating from the best claim), they are rewarded.
+//! 2. Any solution after the first correct solution is refunded in an unbiased way.
+//! 3. Any invalid solution that wasted valuable blockchain time gets slashed for their deposit.
+//!
+//! ## Future Plans:
+//!
+//! **Lazy deletion**:
+//! Overall, this pallet can avoid the need to delete any storage item, by:
+//! 1. outsource the storage of solution data to some other pallet.
+//! 2. keep it here, but make everything be also a map of the round number, so that we can keep old
+//!    storage, and it is ONLY EVER removed, when after that round number is over. This can happen
+//!    for more or less free by the submitter itself, and by anyone else as well, in which case they
+//!    get a share of the the sum deposit. The share increases as times goes on.
+//! **Metadata update**: imagine you mis-computed your score.
+
+// TODO: we should delete this async and once the round is passed.
+// Registration would consequently be as follows:
+// - If you get ejected, and you are lazy removed, a percentage of your deposit is burned. If we set
+//   this to 100%, we will not have bad submissions after the queue is full. The queue can be made
+//   full by purely an attacker, in which case the sum of deposits should be large enough to cover
+//   the fact that we will have a bad election.
+// - whitelisted accounts who will not pay deposits are needed. They can still be ejected, but for
+//   free.
+// - Deposit should exponentially increase, and in general we should not allow for more than say 8
+//   signed submissions.
+
+use crate::{
+	types::SolutionOf,
+	verifier::{AsynchronousVerifier, SolutionDataProvider, Status, VerificationResult},
+};
+use codec::{Decode, Encode, MaxEncodedLen};
+use frame_election_provider_support::PageIndex;
+use frame_support::{
+	dispatch::DispatchResultWithPostInfo,
+	pallet_prelude::{StorageDoubleMap, ValueQuery, *},
+	traits::{
+		tokens::{
+			fungible::{Inspect, Mutate, MutateHold},
+			Fortitude, Precision,
+		},
+		Defensive, DefensiveSaturating, EstimateCallFee,
+	},
+	transactional, BoundedVec, Twox64Concat,
+};
+use frame_system::{ensure_signed, pallet_prelude::*};
+use scale_info::TypeInfo;
+use sp_io::MultiRemovalResults;
+use sp_npos_elections::ElectionScore;
+use sp_runtime::{traits::Saturating, Perbill};
+use sp_std::prelude::*;
+
+/// Explore all weights
+pub use crate::weights::measured::pallet_election_provider_multi_block_signed::*;
+/// Exports of this pallet
+pub use pallet::*;
+
+#[cfg(feature = "runtime-benchmarks")]
+mod benchmarking;
+
+pub(crate) type SignedWeightsOf<T> = <T as crate::signed::Config>::WeightInfo;
+
+#[cfg(test)]
+mod tests;
+
+type BalanceOf<T> =
+	<<T as Config>::Currency as Inspect<<T as frame_system::Config>::AccountId>>::Balance;
+
+/// All of the (meta) data around a signed submission
+#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, Default, DebugNoBound)]
+#[cfg_attr(test, derive(frame_support::PartialEqNoBound, frame_support::EqNoBound))]
+#[codec(mel_bound(T: Config))]
+#[scale_info(skip_type_params(T))]
+pub struct SubmissionMetadata<T: Config> {
+	/// The amount of deposit that has been held in reserve.
+	deposit: BalanceOf<T>,
+	/// The amount of transaction fee that this submission has cost for its submitter so far.
+	fee: BalanceOf<T>,
+	/// The amount of rewards that we expect to give to this submission, if deemed worthy.
+	reward: BalanceOf<T>,
+	/// The score that this submission is claiming to achieve.
+	claimed_score: ElectionScore,
+	/// A bounded-bool-vec of pages that have been submitted so far.
+	pages: BoundedVec<bool, T::Pages>,
+}
+
+impl<T: Config> SolutionDataProvider for Pallet<T> {
+	type Solution = SolutionOf<T::MinerConfig>;
+
+	fn get_page(page: PageIndex) -> Option<Self::Solution> {
+		// note: a non-existing page will still be treated as merely an empty page. This could be
+		// re-considered.
+		let current_round = Self::current_round();
+		Submissions::<T>::leader(current_round).map(|(who, _score)| {
+			sublog!(info, "signed", "returning page {} of {:?}'s submission as leader.", page, who);
+			Submissions::<T>::get_page_of(current_round, &who, page).unwrap_or_default()
+		})
+	}
+
+	fn get_score() -> Option<ElectionScore> {
+		Submissions::<T>::leader(Self::current_round()).map(|(_who, score)| score)
+	}
+
+	fn report_result(result: crate::verifier::VerificationResult) {
+		// assumption of the trait.
+		debug_assert!(matches!(<T::Verifier as AsynchronousVerifier>::status(), Status::Nothing));
+		let current_round = Self::current_round();
+
+		match result {
+			VerificationResult::Queued => {
+				// defensive: if there is a result to be reported, then we must have had some
+				// leader.
+				if let Some((winner, metadata)) =
+					Submissions::<T>::take_leader_with_data(Self::current_round()).defensive()
+				{
+					// first, let's give them their reward.
+					let reward = metadata.reward.saturating_add(metadata.fee);
+					let _r = T::Currency::mint_into(&winner, reward);
+					debug_assert!(_r.is_ok());
+					Self::deposit_event(Event::<T>::Rewarded(
+						current_round,
+						winner.clone(),
+						reward,
+					));
+
+					// then, unreserve their deposit
+					let _res = T::Currency::release(
+						&HoldReason::SignedSubmission.into(),
+						&winner,
+						metadata.deposit,
+						Precision::BestEffort,
+					);
+					debug_assert!(_res.is_ok());
+
+					// note: we could wipe this data either over time, or via transactions.
+					while let Some((discarded, metadata)) =
+						Submissions::<T>::take_leader_with_data(Self::current_round())
+					{
+						let _res = T::Currency::release(
+							&HoldReason::SignedSubmission.into(),
+							&discarded,
+							metadata.deposit,
+							Precision::BestEffort,
+						);
+						debug_assert_eq!(_res, Ok(metadata.deposit));
+						Self::deposit_event(Event::<T>::Discarded(current_round, discarded));
+					}
+
+					// everything should have been clean.
+					#[cfg(debug_assertions)]
+					assert!(Submissions::<T>::ensure_killed(current_round).is_ok());
+				}
+			},
+			VerificationResult::Rejected => {
+				// defensive: if there is a result to be reported, then we must have had some
+				// leader.
+				if let Some((loser, metadata)) =
+					Submissions::<T>::take_leader_with_data(Self::current_round()).defensive()
+				{
+					// first, let's slash their deposit.
+					let slash = metadata.deposit;
+					let _res = T::Currency::burn_held(
+						&HoldReason::SignedSubmission.into(),
+						&loser,
+						slash,
+						Precision::BestEffort,
+						Fortitude::Force,
+					);
+					debug_assert_eq!(_res, Ok(slash));
+					Self::deposit_event(Event::<T>::Slashed(current_round, loser.clone(), slash));
+
+					// inform the verifier that they can now try again, if we're still in the signed
+					// validation phase.
+					if crate::Pallet::<T>::current_phase().is_signed_validation() &&
+						Submissions::<T>::has_leader(current_round)
+					{
+						// defensive: verifier just reported back a result, it must be in clear
+						// state.
+						let _ = <T::Verifier as AsynchronousVerifier>::start().defensive();
+					}
+				}
+			},
+			VerificationResult::DataUnavailable => {
+				unreachable!("TODO")
+			},
+		}
+	}
+}
+
+#[frame_support::pallet]
+pub mod pallet {
+	use super::{WeightInfo, *};
+
+	#[pallet::config]
+	#[pallet::disable_frame_system_supertrait_check]
+	pub trait Config: crate::Config {
+		/// The overarching event type.
+		type RuntimeEvent: From<Event<Self>>
+			+ IsType<<Self as frame_system::Config>::RuntimeEvent>
+			+ TryInto<Event<Self>>;
+
+		/// Handler to the currency.
+		type Currency: Inspect<Self::AccountId>
+			+ Mutate<Self::AccountId>
+			+ MutateHold<Self::AccountId, Reason = Self::RuntimeHoldReason>;
+
+		/// Base deposit amount for a submission.
+		type DepositBase: Get<BalanceOf<Self>>;
+
+		/// Extra deposit per-page.
+		type DepositPerPage: Get<BalanceOf<Self>>;
+
+		/// Base reward that is given to the winner.
+		type RewardBase: Get<BalanceOf<Self>>;
+
+		/// Maximum number of submissions. This, combined with `SignedValidationPhase` and `Pages`
+		/// dictates how many signed solutions we can verify.
+		type MaxSubmissions: Get<u32>;
+
+		/// The ratio of the deposit to return in case a signed account submits a solution via
+		/// [`Pallet::register`], but later calls [`Pallet::bail`].
+		///
+		/// This should be large enough to cover for the deletion cost of possible all pages. To be
+		/// safe, you can put it to 100% to begin with to fully dis-incentivize bailing.
+		type BailoutGraceRatio: Get<Perbill>;
+
+		/// Handler to estimate the fee of a call. Useful to refund the transaction fee of the
+		/// submitter for the winner.
+		type EstimateCallFee: EstimateCallFee<Call<Self>, BalanceOf<Self>>;
+
+		/// Overarching hold reason.
+		type RuntimeHoldReason: From<HoldReason>;
+
+		/// Provided weights of this pallet.
+		type WeightInfo: WeightInfo;
+	}
+
+	/// The hold reason of this palelt.
+	#[pallet::composite_enum]
+	pub enum HoldReason {
+		/// Because of submitting a signed solution.
+		#[codec(index = 0)]
+		SignedSubmission,
+	}
+
+	/// Wrapper type for signed submissions.
+	///
+	/// It handles 3 storage items:
+	///
+	/// 1. [`SortedScores`]: A flat vector of all submissions' `(submitter_id, claimed_score)`.
+	/// 2. [`SubmissionStorage`]: Paginated map of of all submissions, keyed by submitter and page.
+	/// 3. [`SubmissionMetadataStorage`]: Map from submitter to the metadata of their submission.
+	///
+	/// All storage items in this group are mapped, and their first key is the `round` to which they
+	/// belong to. In essence, we are storing multiple versions of each group.
+	///
+	/// ### Invariants:
+	///
+	/// This storage group is sane, clean, and consistent if the following invariants are held:
+	///
+	/// Among the submissions of each round:
+	/// - `SortedScores` should never contain duplicate account ids.
+	/// - For any account id in `SortedScores`, a corresponding value should exist in
+	/// `SubmissionMetadataStorage` under that account id's key.
+	///       - And the value of `metadata.score` must be equal to the score stored in
+	///         `SortedScores`.
+	/// - And visa versa: for any key existing in `SubmissionMetadataStorage`, an item must exist in
+	///   `SortedScores`.
+	/// - For any first key existing in `SubmissionStorage`, a key must exist in
+	///   `SubmissionMetadataStorage`.
+	/// - For any first key in `SubmissionStorage`, the number of second keys existing should be the
+	///   same as the `true` count of `pages` in [`SubmissionMetadata`] (this already implies the
+	///   former, since it uses the metadata).
+	///
+	/// All mutating functions are only allowed to transition into states where all of the above
+	/// conditions are met.
+	///
+	/// No particular invariant exists between data that related to different rounds. They are
+	/// purely independent.
+	pub(crate) struct Submissions<T: Config>(sp_std::marker::PhantomData<T>);
+
+	#[pallet::storage]
+	type SortedScores<T: Config> = StorageMap<
+		_,
+		Twox64Concat,
+		u32,
+		BoundedVec<(T::AccountId, ElectionScore), T::MaxSubmissions>,
+		ValueQuery,
+	>;
+
+	/// Triple map from (round, account, page) to a solution page.
+	#[pallet::storage]
+	type SubmissionStorage<T: Config> = StorageNMap<
+		_,
+		(
+			NMapKey<Twox64Concat, u32>,
+			NMapKey<Twox64Concat, T::AccountId>,
+			NMapKey<Twox64Concat, PageIndex>,
+		),
+		SolutionOf<T::MinerConfig>,
+		OptionQuery,
+	>;
+
+	/// Map from account to the metadata of their submission.
+	///
+	/// invariant: for any Key1 of type `AccountId` in [`Submissions`], this storage map also has a
+	/// value.
+	#[pallet::storage]
+	type SubmissionMetadataStorage<T: Config> =
+		StorageDoubleMap<_, Twox64Concat, u32, Twox64Concat, T::AccountId, SubmissionMetadata<T>>;
+
+	impl<T: Config> Submissions<T> {
+		// -- mutating functions
+
+		/// Generic checked mutation helper.
+		///
+		/// All mutating functions must be fulled through this bad boy. The round at which the
+		/// mutation happens must be provided
+		fn mutate_checked<R, F: FnOnce() -> R>(_round: u32, mutate: F) -> R {
+			let result = mutate();
+
+			#[cfg(debug_assertions)]
+			{
+				assert!(Self::sanity_check_round(_round).is_ok());
+				assert!(Self::sanity_check_round(_round + 1).is_ok());
+				assert!(Self::sanity_check_round(_round.saturating_sub(1)).is_ok());
+			}
+
+			result
+		}
+
+		/// *Fully* **TAKE** (i.e. get and remove) the leader from storage, with all of its
+		/// associated data.
+		///
+		/// This removes all associated data of the leader from storage, discarding the submission
+		/// data and score, returning the rest.
+		pub(crate) fn take_leader_with_data(
+			round: u32,
+		) -> Option<(T::AccountId, SubmissionMetadata<T>)> {
+			Self::mutate_checked(round, || {
+				SortedScores::<T>::mutate(round, |sorted| sorted.pop()).and_then(
+					|(submitter, _score)| {
+						// NOTE: safe to remove unbounded, as at most `Pages` pages are stored.
+						let r: MultiRemovalResults = SubmissionStorage::<T>::clear_prefix(
+							(round, &submitter),
+							u32::MAX,
+							None,
+						);
+						debug_assert!(r.unique <= T::Pages::get());
+
+						SubmissionMetadataStorage::<T>::take(round, &submitter)
+							.map(|metadata| (submitter, metadata))
+					},
+				)
+			})
+		}
+
+		/// *Fully* **TAKE** (i.e. get and remove) a submission from storage, with all of its
+		/// associated data.
+		///
+		/// This removes all associated data of the submitter from storage, discarding the
+		/// submission data and score, returning the metadata.
+		pub(crate) fn take_submission_with_data(
+			round: u32,
+			who: &T::AccountId,
+		) -> Option<SubmissionMetadata<T>> {
+			Self::mutate_checked(round, || {
+				SortedScores::<T>::mutate(round, |sorted_scores| {
+					if let Some(index) = sorted_scores.iter().position(|(x, _)| x == who) {
+						sorted_scores.remove(index);
+					}
+				});
+				// Note: safe to remove unbounded, as at most `Pages` pages are stored.
+				let r = SubmissionStorage::<T>::clear_prefix((round, who), u32::MAX, None);
+				debug_assert!(r.unique <= T::Pages::get());
+
+				SubmissionMetadataStorage::<T>::take(round, who)
+			})
+		}
+
+		/// Try and register a new solution.
+		///
+		/// Registration can only happen for the current round.
+		///
+		/// registration might fail if the queue is already full, and the solution is not good
+		/// enough to eject the weakest.
+		fn try_register(
+			round: u32,
+			who: &T::AccountId,
+			metadata: SubmissionMetadata<T>,
+		) -> Result<bool, DispatchError> {
+			Self::mutate_checked(round, || Self::try_register_inner(round, who, metadata))
+		}
+
+		fn try_register_inner(
+			round: u32,
+			who: &T::AccountId,
+			metadata: SubmissionMetadata<T>,
+		) -> Result<bool, DispatchError> {
+			let mut sorted_scores = SortedScores::<T>::get(round);
+
+			let discarded = if let Some(_) = sorted_scores.iter().position(|(x, _)| x == who) {
+				return Err(Error::<T>::Duplicate.into());
+			} else {
+				// must be new.
+				debug_assert!(!SubmissionMetadataStorage::<T>::contains_key(round, who));
+
+				let pos = match sorted_scores
+					.binary_search_by_key(&metadata.claimed_score, |(_, y)| *y)
+				{
+					// an equal score exists, unlikely, but could very well happen. We just put them
+					// next to each other.
+					Ok(pos) => pos,
+					// new score, should be inserted in this pos.
+					Err(pos) => pos,
+				};
+
+				let record = (who.clone(), metadata.claimed_score);
+				match sorted_scores.force_insert_keep_right(pos, record) {
+					Ok(None) => false,
+					Ok(Some((discarded, _score))) => {
+						let metadata = SubmissionMetadataStorage::<T>::take(round, &discarded);
+						// Note: safe to remove unbounded, as at most `Pages` pages are stored.
+						let _r = SubmissionStorage::<T>::clear_prefix(
+							(round, &discarded),
+							u32::MAX,
+							None,
+						);
+						debug_assert!(_r.unique <= T::Pages::get());
+						let to_refund = metadata.map(|m| m.deposit).defensive_unwrap_or_default();
+						let _released = T::Currency::release(
+							&HoldReason::SignedSubmission.into(),
+							&discarded,
+							to_refund,
+							Precision::BestEffort,
+						)?;
+						debug_assert_eq!(_released, to_refund);
+						Pallet::<T>::deposit_event(Event::<T>::Discarded(round, discarded));
+						true
+					},
+					Err(_) => return Err(Error::<T>::QueueFull.into()),
+				}
+			};
+
+			SortedScores::<T>::insert(round, sorted_scores);
+			SubmissionMetadataStorage::<T>::insert(round, who, metadata);
+			Ok(discarded)
+		}
+
+		/// Submit a page of `solution` to the `page` index of `who`'s submission.
+		///
+		/// Updates the deposit in the metadata accordingly.
+		///
+		/// - If `maybe_solution` is `None`, then the given page is deleted.
+		/// - `who` must have already registered their submission.
+		/// - If the page is duplicate, it will replaced.
+		pub(crate) fn try_mutate_page(
+			round: u32,
+			who: &T::AccountId,
+			page: PageIndex,
+			maybe_solution: Option<Box<SolutionOf<T::MinerConfig>>>,
+		) -> DispatchResultWithPostInfo {
+			Self::mutate_checked(round, || {
+				Self::try_mutate_page_inner(round, who, page, maybe_solution)
+			})
+		}
+
+		fn try_mutate_page_inner(
+			round: u32,
+			who: &T::AccountId,
+			page: PageIndex,
+			maybe_solution: Option<Box<SolutionOf<T::MinerConfig>>>,
+		) -> DispatchResultWithPostInfo {
+			let mut metadata =
+				SubmissionMetadataStorage::<T>::get(round, who).ok_or(Error::<T>::NotRegistered)?;
+			ensure!(page < T::Pages::get(), Error::<T>::BadPageIndex);
+
+			// defensive only: we resize `meta.pages` once to be `T::Pages` elements once, and never
+			// resize it again; `page` is checked here to be in bound; element must exist; qed.
+			if let Some(page_bit) = metadata.pages.get_mut(page as usize).defensive() {
+				*page_bit = maybe_solution.is_some();
+			}
+
+			// update deposit.
+			let new_pages: BalanceOf<T> =
+				(metadata.pages.iter().filter(|x| **x).count() as u32).into();
+			let new_deposit = T::DepositBase::get() + T::DepositPerPage::get() * new_pages;
+			let old_deposit = metadata.deposit;
+			if new_deposit > old_deposit {
+				let to_reserve = new_deposit - old_deposit;
+				T::Currency::hold(&HoldReason::SignedSubmission.into(), who, to_reserve)?;
+			} else {
+				let to_unreserve = old_deposit - new_deposit;
+				let _res = T::Currency::release(
+					&HoldReason::SignedSubmission.into(),
+					who,
+					to_unreserve,
+					Precision::BestEffort,
+				);
+				debug_assert_eq!(_res, Ok(to_unreserve));
+			};
+			metadata.deposit = new_deposit;
+
+			// If a page is being added, we record the fee as well. For removals, we ignore the fee
+			// as it is negligible, and we don't want to encourage anyone to submit and remove
+			// anyways. Note that fee is only refunded for the winner anyways.
+			if maybe_solution.is_some() {
+				let fee = T::EstimateCallFee::estimate_call_fee(
+					&Call::submit_page { page, maybe_solution: maybe_solution.clone() },
+					None.into(),
+				);
+				metadata.fee.saturating_accrue(fee);
+			}
+
+			SubmissionStorage::<T>::mutate_exists((round, who, page), |maybe_old_solution| {
+				*maybe_old_solution = maybe_solution.map(|s| *s)
+			});
+			SubmissionMetadataStorage::<T>::insert(round, who, metadata);
+			Ok(().into())
+		}
+
+		// -- getter functions
+		pub(crate) fn has_leader(round: u32) -> bool {
+			!SortedScores::<T>::get(round).is_empty()
+		}
+
+		pub(crate) fn leader(round: u32) -> Option<(T::AccountId, ElectionScore)> {
+			SortedScores::<T>::get(round).last().cloned()
+		}
+
+		pub(crate) fn get_page_of(
+			round: u32,
+			who: &T::AccountId,
+			page: PageIndex,
+		) -> Option<SolutionOf<T::MinerConfig>> {
+			SubmissionStorage::<T>::get((round, who, &page))
+		}
+	}
+
+	#[allow(unused)]
+	#[cfg(any(feature = "try-runtime", test, feature = "runtime-benchmarks", debug_assertions))]
+	impl<T: Config> Submissions<T> {
+		pub(crate) fn sorted_submitters(round: u32) -> BoundedVec<T::AccountId, T::MaxSubmissions> {
+			use frame_support::traits::TryCollect;
+			SortedScores::<T>::get(round).into_iter().map(|(x, _)| x).try_collect().unwrap()
+		}
+
+		pub fn submissions_iter(
+			round: u32,
+		) -> impl Iterator<Item = (T::AccountId, PageIndex, SolutionOf<T::MinerConfig>)> {
+			SubmissionStorage::<T>::iter_prefix((round,)).map(|((x, y), z)| (x, y, z))
+		}
+
+		pub fn metadata_iter(
+			round: u32,
+		) -> impl Iterator<Item = (T::AccountId, SubmissionMetadata<T>)> {
+			SubmissionMetadataStorage::<T>::iter_prefix(round)
+		}
+
+		pub fn metadata_of(round: u32, who: T::AccountId) -> Option<SubmissionMetadata<T>> {
+			SubmissionMetadataStorage::<T>::get(round, who)
+		}
+
+		pub fn pages_of(
+			round: u32,
+			who: T::AccountId,
+		) -> impl Iterator<Item = (PageIndex, SolutionOf<T::MinerConfig>)> {
+			SubmissionStorage::<T>::iter_prefix((round, who))
+		}
+
+		pub fn leaderboard(
+			round: u32,
+		) -> BoundedVec<(T::AccountId, ElectionScore), T::MaxSubmissions> {
+			SortedScores::<T>::get(round)
+		}
+
+		/// Ensure that all the storage items associated with the given round are in `killed` state,
+		/// meaning that in the expect state after an election is OVER.
+		pub(crate) fn ensure_killed(round: u32) -> DispatchResult {
+			ensure!(Self::metadata_iter(round).count() == 0, "metadata_iter not cleared.");
+			ensure!(Self::submissions_iter(round).count() == 0, "submissions_iter not cleared.");
+			ensure!(Self::sorted_submitters(round).len() == 0, "sorted_submitters not cleared.");
+
+			Ok(())
+		}
+
+		/// Perform all the sanity checks of this storage item group at the given round.
+		pub(crate) fn sanity_check_round(round: u32) -> DispatchResult {
+			use sp_std::collections::btree_set::BTreeSet;
+			let sorted_scores = SortedScores::<T>::get(round);
+			assert_eq!(
+				sorted_scores.clone().into_iter().map(|(x, _)| x).collect::<BTreeSet<_>>().len(),
+				sorted_scores.len()
+			);
+
+			let _ = SubmissionMetadataStorage::<T>::iter_prefix(round)
+				.map(|(submitter, meta)| {
+					let mut matches = SortedScores::<T>::get(round)
+						.into_iter()
+						.filter(|(who, _score)| who == &submitter)
+						.collect::<Vec<_>>();
+
+					ensure!(
+						matches.len() == 1,
+						"item existing in metadata but missing in sorted list.",
+					);
+
+					let (_, score) = matches.pop().expect("checked; qed");
+					ensure!(score == meta.claimed_score, "score mismatch");
+					Ok(())
+				})
+				.collect::<Result<Vec<_>, &'static str>>()?;
+
+			ensure!(
+				SubmissionStorage::<T>::iter_key_prefix((round,)).map(|(k1, _k2)| k1).all(
+					|submitter| SubmissionMetadataStorage::<T>::contains_key(round, submitter)
+				),
+				"missing metadata of submitter"
+			);
+
+			for submitter in SubmissionStorage::<T>::iter_key_prefix((round,)).map(|(k1, _k2)| k1) {
+				let pages_count =
+					SubmissionStorage::<T>::iter_key_prefix((round, &submitter)).count();
+				let metadata = SubmissionMetadataStorage::<T>::get(round, submitter)
+					.expect("metadata checked to exist for all keys; qed");
+				let assumed_pages_count = metadata.pages.iter().filter(|x| **x).count();
+				ensure!(pages_count == assumed_pages_count, "wrong page count");
+			}
+
+			Ok(())
+		}
+	}
+
+	#[pallet::pallet]
+	pub struct Pallet<T>(PhantomData<T>);
+
+	#[pallet::event]
+	#[pallet::generate_deposit(pub(super) fn deposit_event)]
+	pub enum Event<T: Config> {
+		/// Upcoming submission has been registered for the given account, with the given score.
+		Registered(u32, T::AccountId, ElectionScore),
+		/// A page of solution solution with the given index has been stored for the given account.
+		Stored(u32, T::AccountId, PageIndex),
+		/// The given account has been rewarded with the given amount.
+		Rewarded(u32, T::AccountId, BalanceOf<T>),
+		/// The given account has been slashed with the given amount.
+		Slashed(u32, T::AccountId, BalanceOf<T>),
+		/// The given account has been discarded.
+		Discarded(u32, T::AccountId),
+		/// The given account has bailed.
+		Bailed(u32, T::AccountId),
+	}
+
+	#[pallet::error]
+	pub enum Error<T> {
+		/// The phase is not signed.
+		PhaseNotSigned,
+		/// The submission is a duplicate.
+		Duplicate,
+		/// The queue is full.
+		QueueFull,
+		/// The page index is out of bounds.
+		BadPageIndex,
+		/// The account is not registered.
+		NotRegistered,
+		/// No submission found.
+		NoSubmission,
+	}
+
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {
+		/// Register oneself for an upcoming signed election.
+		#[pallet::weight(SignedWeightsOf::<T>::register_eject())]
+		#[pallet::call_index(0)]
+		pub fn register(
+			origin: OriginFor<T>,
+			claimed_score: ElectionScore,
+		) -> DispatchResultWithPostInfo {
+			let who = ensure_signed(origin)?;
+			ensure!(crate::Pallet::<T>::current_phase().is_signed(), Error::<T>::PhaseNotSigned);
+
+			// note: we could already check if this is a duplicate here, but prefer keeping the code
+			// simple for now.
+
+			let deposit = T::DepositBase::get();
+			let reward = T::RewardBase::get();
+			let fee = T::EstimateCallFee::estimate_call_fee(
+				&Call::register { claimed_score },
+				None.into(),
+			);
+			let mut pages = BoundedVec::<_, _>::with_bounded_capacity(T::Pages::get() as usize);
+			pages.bounded_resize(T::Pages::get() as usize, false);
+
+			let new_metadata = SubmissionMetadata { claimed_score, deposit, reward, fee, pages };
+
+			T::Currency::hold(&HoldReason::SignedSubmission.into(), &who, deposit)?;
+			let round = Self::current_round();
+			let discarded = Submissions::<T>::try_register(round, &who, new_metadata)?;
+			Self::deposit_event(Event::<T>::Registered(round, who, claimed_score));
+
+			// maybe refund.
+			if discarded {
+				Ok(().into())
+			} else {
+				Ok(Some(SignedWeightsOf::<T>::register_not_full()).into())
+			}
+		}
+
+		/// Submit a single page of a solution.
+		///
+		/// Must always come after [`Pallet::register`].
+		///
+		/// `maybe_solution` can be set to `None` to erase the page.
+		///
+		/// Collects deposits from the signed origin based on [`Config::DepositBase`] and
+		/// [`Config::DepositPerPage`].
+		#[pallet::weight(SignedWeightsOf::<T>::submit_page())]
+		#[pallet::call_index(1)]
+		pub fn submit_page(
+			origin: OriginFor<T>,
+			page: PageIndex,
+			maybe_solution: Option<Box<SolutionOf<T::MinerConfig>>>,
+		) -> DispatchResultWithPostInfo {
+			let who = ensure_signed(origin)?;
+			ensure!(crate::Pallet::<T>::current_phase().is_signed(), Error::<T>::PhaseNotSigned);
+			let is_set = maybe_solution.is_some();
+
+			let round = Self::current_round();
+			Submissions::<T>::try_mutate_page(round, &who, page, maybe_solution)?;
+			Self::deposit_event(Event::<T>::Stored(round, who, page));
+
+			// maybe refund.
+			if is_set {
+				Ok(().into())
+			} else {
+				Ok(Some(SignedWeightsOf::<T>::unset_page()).into())
+			}
+		}
+
+		/// Retract a submission.
+		///
+		/// A portion of the deposit may be returned, based on the [`Config::BailoutGraceRatio`].
+		///
+		/// This will fully remove the solution from storage.
+		#[pallet::weight(SignedWeightsOf::<T>::bail())]
+		#[pallet::call_index(2)]
+		#[transactional]
+		pub fn bail(origin: OriginFor<T>) -> DispatchResultWithPostInfo {
+			let who = ensure_signed(origin)?;
+			ensure!(crate::Pallet::<T>::current_phase().is_signed(), Error::<T>::PhaseNotSigned);
+			let round = Self::current_round();
+			let metadata = Submissions::<T>::take_submission_with_data(round, &who)
+				.ok_or(Error::<T>::NoSubmission)?;
+
+			let deposit = metadata.deposit;
+			let to_refund = T::BailoutGraceRatio::get() * deposit;
+			let to_slash = deposit.defensive_saturating_sub(to_refund);
+
+			let _res = T::Currency::release(
+				&HoldReason::SignedSubmission.into(),
+				&who,
+				to_refund,
+				Precision::BestEffort,
+			)
+			.defensive();
+			debug_assert_eq!(_res, Ok(to_refund));
+
+			let _res = T::Currency::burn_held(
+				&HoldReason::SignedSubmission.into(),
+				&who,
+				to_slash,
+				Precision::BestEffort,
+				Fortitude::Force,
+			)
+			.defensive();
+			debug_assert_eq!(_res, Ok(to_slash));
+
+			Self::deposit_event(Event::<T>::Bailed(round, who));
+
+			Ok(None.into())
+		}
+	}
+
+	#[pallet::hooks]
+	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
+		fn on_initialize(now: BlockNumberFor<T>) -> Weight {
+			// this code is only called when at the boundary of phase transition, which is already
+			// captured by the parent pallet. No need for weight.
+			let weight_taken_into_account: Weight = Default::default();
+
+			if crate::Pallet::<T>::current_phase().is_signed_validation_open_at(now) {
+				let maybe_leader = Submissions::<T>::leader(Self::current_round());
+				sublog!(
+					info,
+					"signed",
+					"signed validation started, sending validation start signal? {:?}",
+					maybe_leader.is_some()
+				);
+
+				// start an attempt to verify our best thing.
+				if maybe_leader.is_some() {
+					// defensive: signed phase has just began, verifier should be in a clear state
+					// and ready to accept a solution.
+					let _ = <T::Verifier as AsynchronousVerifier>::start().defensive();
+				}
+			}
+
+			if crate::Pallet::<T>::current_phase().is_unsigned_open_at(now) {
+				// signed validation phase just ended, make sure you stop any ongoing operation.
+				sublog!(info, "signed", "signed validation ended, sending validation stop signal",);
+				<T::Verifier as AsynchronousVerifier>::stop();
+			}
+
+			weight_taken_into_account
+		}
+
+		#[cfg(feature = "try-runtime")]
+		fn try_state(n: BlockNumberFor<T>) -> Result<(), sp_runtime::TryRuntimeError> {
+			Self::do_try_state(n)
+		}
+	}
+}
+
+impl<T: Config> Pallet<T> {
+	#[cfg(any(feature = "try-runtime", test, feature = "runtime-benchmarks"))]
+	pub(crate) fn do_try_state(_n: BlockNumberFor<T>) -> Result<(), sp_runtime::TryRuntimeError> {
+		Submissions::<T>::sanity_check_round(Self::current_round())
+	}
+
+	fn current_round() -> u32 {
+		crate::Pallet::<T>::round()
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/signed/tests.rs b/substrate/frame/election-provider-multi-block/src/signed/tests.rs
new file mode 100644
index 00000000000..7d0b1652c1e
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/signed/tests.rs
@@ -0,0 +1,554 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use super::{Event as SignedEvent, *};
+use crate::{mock::*, verifier::FeasibilityError};
+use sp_core::bounded_vec;
+
+pub type T = Runtime;
+
+mod calls {
+	use super::*;
+	use crate::Phase;
+	use sp_runtime::{DispatchError, TokenError::FundsUnavailable};
+
+	#[test]
+	fn cannot_register_with_insufficient_balance() {
+		ExtBuilder::signed().build_and_execute(|| {
+			roll_to_signed_open();
+			// 777 is not funded.
+			assert_noop!(
+				SignedPallet::register(RuntimeOrigin::signed(777), Default::default()),
+				DispatchError::Token(FundsUnavailable)
+			);
+		});
+
+		ExtBuilder::signed().build_and_execute(|| {
+			roll_to_signed_open();
+			// 99 is funded but deposit is too high.
+			assert_eq!(balances(99), (100, 0));
+			SignedDepositBase::set(101);
+			assert_noop!(
+				SignedPallet::register(RuntimeOrigin::signed(99), Default::default()),
+				DispatchError::Token(FundsUnavailable)
+			);
+		})
+	}
+
+	#[test]
+	fn cannot_register_if_not_signed() {
+		ExtBuilder::signed().build_and_execute(|| {
+			assert!(crate::Pallet::<T>::current_phase() != Phase::Signed);
+			assert_noop!(
+				SignedPallet::register(RuntimeOrigin::signed(99), Default::default()),
+				Error::<T>::PhaseNotSigned
+			);
+		})
+	}
+
+	#[test]
+	fn register_metadata_works() {
+		ExtBuilder::signed().build_and_execute(|| {
+			roll_to_signed_open();
+			assert_full_snapshot();
+
+			assert_eq!(balances(99), (100, 0));
+			let score = ElectionScore { minimal_stake: 100, ..Default::default() };
+
+			assert_ok!(SignedPallet::register(RuntimeOrigin::signed(99), score));
+			assert_eq!(balances(99), (95, 5));
+
+			assert_eq!(Submissions::<Runtime>::metadata_iter(1).count(), 0);
+			assert_eq!(Submissions::<Runtime>::metadata_iter(0).count(), 1);
+			assert_eq!(
+				Submissions::<Runtime>::metadata_of(0, 99).unwrap(),
+				SubmissionMetadata {
+					claimed_score: score,
+					deposit: 5,
+					fee: 1,
+					pages: bounded_vec![false, false, false],
+					reward: 3
+				}
+			);
+			assert_eq!(
+				*Submissions::<Runtime>::leaderboard(0),
+				vec![(99, ElectionScore { minimal_stake: 100, ..Default::default() })]
+			);
+			assert!(matches!(signed_events().as_slice(), &[
+					SignedEvent::Registered(_, x, _),
+				] if x == 99));
+
+			// second ones submits
+			assert_eq!(balances(999), (100, 0));
+			let score = ElectionScore { minimal_stake: 90, ..Default::default() };
+			assert_ok!(SignedPallet::register(RuntimeOrigin::signed(999), score));
+			assert_eq!(balances(999), (95, 5));
+
+			assert_eq!(
+				Submissions::<Runtime>::metadata_of(0, 999).unwrap(),
+				SubmissionMetadata {
+					claimed_score: score,
+					deposit: 5,
+					fee: 1,
+					pages: bounded_vec![false, false, false],
+					reward: 3
+				}
+			);
+			assert!(matches!(signed_events().as_slice(), &[
+					SignedEvent::Registered(..),
+					SignedEvent::Registered(_, x, _),
+				] if x == 999));
+
+			assert_eq!(
+				*Submissions::<Runtime>::leaderboard(0),
+				vec![
+					(999, ElectionScore { minimal_stake: 90, ..Default::default() }),
+					(99, ElectionScore { minimal_stake: 100, ..Default::default() })
+				]
+			);
+			assert_eq!(Submissions::<Runtime>::metadata_iter(1).count(), 0);
+			assert_eq!(Submissions::<Runtime>::metadata_iter(0).count(), 2);
+
+			// submit again with a new score.
+			assert_noop!(
+				SignedPallet::register(
+					RuntimeOrigin::signed(999),
+					ElectionScore { minimal_stake: 80, ..Default::default() }
+				),
+				Error::<T>::Duplicate,
+			);
+		})
+	}
+
+	#[test]
+	fn page_submission_accumulates_fee() {
+		ExtBuilder::signed().build_and_execute(|| {
+			roll_to_signed_open();
+			assert_full_snapshot();
+
+			let score = ElectionScore { minimal_stake: 100, ..Default::default() };
+			assert_ok!(SignedPallet::register(RuntimeOrigin::signed(99), score));
+
+			// fee for register is recorded.
+			assert_eq!(
+				Submissions::<Runtime>::metadata_of(0, 99).unwrap(),
+				SubmissionMetadata {
+					claimed_score: score,
+					deposit: 5,
+					fee: 1,
+					pages: bounded_vec![false, false, false],
+					reward: 3
+				}
+			);
+
+			// fee for page submission is recorded.
+			assert_ok!(SignedPallet::submit_page(
+				RuntimeOrigin::signed(99),
+				0,
+				Some(Default::default())
+			));
+			assert_eq!(
+				Submissions::<Runtime>::metadata_of(0, 99).unwrap(),
+				SubmissionMetadata {
+					claimed_score: score,
+					deposit: 6,
+					fee: 2,
+					pages: bounded_vec![true, false, false],
+					reward: 3
+				}
+			);
+
+			// another fee for page submission is recorded.
+			assert_ok!(SignedPallet::submit_page(
+				RuntimeOrigin::signed(99),
+				1,
+				Some(Default::default())
+			));
+			assert_eq!(
+				Submissions::<Runtime>::metadata_of(0, 99).unwrap(),
+				SubmissionMetadata {
+					claimed_score: score,
+					deposit: 7,
+					fee: 3,
+					pages: bounded_vec![true, true, false],
+					reward: 3
+				}
+			);
+
+			// removal updates deposit but not the fee
+			assert_ok!(SignedPallet::submit_page(RuntimeOrigin::signed(99), 1, None));
+
+			assert_eq!(
+				Submissions::<Runtime>::metadata_of(0, 99).unwrap(),
+				SubmissionMetadata {
+					claimed_score: score,
+					deposit: 6,
+					fee: 3,
+					pages: bounded_vec![true, false, false],
+					reward: 3
+				}
+			);
+		});
+	}
+
+	#[test]
+	fn metadata_submission_sorted_based_on_stake() {
+		ExtBuilder::signed().build_and_execute(|| {
+			roll_to_signed_open();
+			assert_full_snapshot();
+
+			let score_from = |x| ElectionScore { minimal_stake: x, ..Default::default() };
+			let assert_held = |x| assert_eq!(balances(x), (95, 5));
+			let assert_unheld = |x| assert_eq!(balances(x), (100, 0));
+
+			assert_ok!(SignedPallet::register(RuntimeOrigin::signed(91), score_from(100)));
+			assert_eq!(*Submissions::<Runtime>::leaderboard(0), vec![(91, score_from(100))]);
+			assert_held(91);
+			assert!(
+				matches!(signed_events().as_slice(), &[SignedEvent::Registered(_, x, _)] if x == 91)
+			);
+
+			// weaker one comes while we have space.
+			assert_ok!(SignedPallet::register(RuntimeOrigin::signed(92), score_from(90)));
+			assert_eq!(
+				*Submissions::<Runtime>::leaderboard(0),
+				vec![(92, score_from(90)), (91, score_from(100))]
+			);
+			assert_held(92);
+			assert!(matches!(signed_events().as_slice(), &[
+					SignedEvent::Registered(..),
+					SignedEvent::Registered(_, x, _),
+				] if x == 92));
+
+			// stronger one comes while we have have space.
+			assert_ok!(SignedPallet::register(RuntimeOrigin::signed(93), score_from(110)));
+			assert_eq!(
+				*Submissions::<Runtime>::leaderboard(0),
+				vec![(92, score_from(90)), (91, score_from(100)), (93, score_from(110))]
+			);
+			assert_held(93);
+			assert!(matches!(signed_events().as_slice(), &[
+					SignedEvent::Registered(..),
+					SignedEvent::Registered(..),
+					SignedEvent::Registered(_, x, _),
+				] if x == 93));
+
+			// weaker one comes while we don't have space.
+			assert_noop!(
+				SignedPallet::register(RuntimeOrigin::signed(94), score_from(80)),
+				Error::<T>::QueueFull
+			);
+			assert_eq!(
+				*Submissions::<Runtime>::leaderboard(0),
+				vec![(92, score_from(90)), (91, score_from(100)), (93, score_from(110))]
+			);
+			assert_unheld(94);
+			// no event has been emitted this time.
+			assert!(matches!(
+				signed_events().as_slice(),
+				&[
+					SignedEvent::Registered(..),
+					SignedEvent::Registered(..),
+					SignedEvent::Registered(..),
+				]
+			));
+
+			// stronger one comes while we don't have space. Eject the weakest
+			assert_ok!(SignedPallet::register(RuntimeOrigin::signed(94), score_from(120)));
+			assert_eq!(
+				*Submissions::<Runtime>::leaderboard(0),
+				vec![(91, score_from(100)), (93, score_from(110)), (94, score_from(120))]
+			);
+			assert!(matches!(
+				signed_events().as_slice(),
+				&[
+					SignedEvent::Registered(..),
+					SignedEvent::Registered(..),
+					SignedEvent::Registered(..),
+					SignedEvent::Discarded(_, 92),
+					SignedEvent::Registered(_, 94, _),
+				]
+			));
+			assert_held(94);
+			assert_unheld(92);
+
+			// another stronger one comes, only replace the weakest.
+			assert_ok!(SignedPallet::register(RuntimeOrigin::signed(95), score_from(105)));
+			assert_eq!(
+				*Submissions::<Runtime>::leaderboard(0),
+				vec![(95, score_from(105)), (93, score_from(110)), (94, score_from(120))]
+			);
+			assert_held(95);
+			assert_unheld(91);
+			assert!(matches!(
+				signed_events().as_slice(),
+				&[
+					SignedEvent::Registered(..),
+					SignedEvent::Registered(..),
+					SignedEvent::Registered(..),
+					SignedEvent::Discarded(..),
+					SignedEvent::Registered(..),
+					SignedEvent::Discarded(_, 91),
+					SignedEvent::Registered(_, 95, _),
+				]
+			));
+		})
+	}
+
+	#[test]
+	fn can_bail_at_a_cost() {
+		ExtBuilder::signed().build_and_execute(|| {
+			roll_to_signed_open();
+			assert_full_snapshot();
+
+			let score = ElectionScore { minimal_stake: 100, ..Default::default() };
+			assert_ok!(SignedPallet::register(RuntimeOrigin::signed(99), score));
+			assert_eq!(balances(99), (95, 5));
+
+			// not submitted, cannot bailout.
+			assert_noop!(SignedPallet::bail(RuntimeOrigin::signed(999)), Error::<T>::NoSubmission);
+
+			// can bail.
+			assert_ok!(SignedPallet::bail(RuntimeOrigin::signed(99)));
+			// 20% of the deposit returned, which is 1, 4 is slashed.
+			assert_eq!(balances(99), (96, 0));
+			assert_no_data_for(0, 99);
+
+			assert_eq!(
+				signed_events(),
+				vec![Event::Registered(0, 99, score), Event::Bailed(0, 99)]
+			);
+		});
+	}
+
+	#[test]
+	fn can_submit_pages() {
+		ExtBuilder::signed().build_and_execute(|| {
+			roll_to_signed_open();
+			assert_full_snapshot();
+
+			assert_noop!(
+				SignedPallet::submit_page(RuntimeOrigin::signed(99), 0, Default::default()),
+				Error::<T>::NotRegistered
+			);
+
+			assert_ok!(SignedPallet::register(
+				RuntimeOrigin::signed(99),
+				ElectionScore { minimal_stake: 100, ..Default::default() }
+			));
+
+			assert_eq!(Submissions::<Runtime>::pages_of(0, 99).count(), 0);
+			assert_eq!(balances(99), (95, 5));
+
+			// indices 0, 1, 2 are valid.
+			assert_noop!(
+				SignedPallet::submit_page(RuntimeOrigin::signed(99), 3, Default::default()),
+				Error::<T>::BadPageIndex
+			);
+
+			// add the first page.
+			assert_ok!(SignedPallet::submit_page(
+				RuntimeOrigin::signed(99),
+				0,
+				Some(Default::default())
+			));
+			assert_eq!(Submissions::<Runtime>::pages_of(0, 99).count(), 1);
+			assert_eq!(balances(99), (94, 6));
+			assert_eq!(
+				Submissions::<Runtime>::metadata_of(0, 99).unwrap().pages.into_inner(),
+				vec![true, false, false]
+			);
+
+			// replace it again, nada.
+			assert_ok!(SignedPallet::submit_page(
+				RuntimeOrigin::signed(99),
+				0,
+				Some(Default::default())
+			));
+			assert_eq!(Submissions::<Runtime>::pages_of(0, 99).count(), 1);
+			assert_eq!(balances(99), (94, 6));
+
+			// add a new one.
+			assert_ok!(SignedPallet::submit_page(
+				RuntimeOrigin::signed(99),
+				1,
+				Some(Default::default())
+			));
+			assert_eq!(Submissions::<Runtime>::pages_of(0, 99).count(), 2);
+			assert_eq!(balances(99), (93, 7));
+			assert_eq!(
+				Submissions::<Runtime>::metadata_of(0, 99).unwrap().pages.into_inner(),
+				vec![true, true, false]
+			);
+
+			// remove one, deposit is back.
+			assert_ok!(SignedPallet::submit_page(RuntimeOrigin::signed(99), 0, None));
+			assert_eq!(Submissions::<Runtime>::pages_of(0, 99).count(), 1);
+			assert_eq!(balances(99), (94, 6));
+			assert_eq!(
+				Submissions::<Runtime>::metadata_of(0, 99).unwrap().pages.into_inner(),
+				vec![false, true, false]
+			);
+
+			assert!(matches!(
+				signed_events().as_slice(),
+				&[
+					SignedEvent::Registered(..),
+					SignedEvent::Stored(.., 0),
+					SignedEvent::Stored(.., 0),
+					SignedEvent::Stored(.., 1),
+					SignedEvent::Stored(.., 0),
+				]
+			));
+		});
+	}
+}
+
+mod e2e {
+	use super::*;
+	#[test]
+	fn good_bad_evil() {
+		// an extensive scenario: 3 solutions submitted, once rewarded, one slashed, and one
+		// discarded.
+		ExtBuilder::signed().build_and_execute(|| {
+			roll_to_signed_open();
+			assert_full_snapshot();
+
+			// an invalid, but weak solution.
+			{
+				let score =
+					ElectionScore { minimal_stake: 10, sum_stake: 10, sum_stake_squared: 100 };
+				assert_ok!(SignedPallet::register(RuntimeOrigin::signed(99), score));
+				assert_ok!(SignedPallet::submit_page(
+					RuntimeOrigin::signed(99),
+					0,
+					Some(Default::default())
+				));
+
+				assert_eq!(balances(99), (94, 6));
+			}
+
+			// a valid, strong solution.
+			let strong_score = {
+				let paged = mine_full_solution().unwrap();
+				load_signed_for_verification(999, paged.clone());
+				assert_eq!(balances(999), (92, 8));
+				paged.score
+			};
+
+			// an invalid, strong solution.
+			{
+				let mut score = strong_score;
+				score.minimal_stake *= 2;
+				assert_ok!(SignedPallet::register(RuntimeOrigin::signed(92), score));
+				assert_eq!(balances(92), (95, 5));
+				// we don't even bother to submit a page..
+			}
+
+			assert_eq!(
+				Submissions::<Runtime>::leaderboard(0)
+					.into_iter()
+					.map(|(x, _)| x)
+					.collect::<Vec<_>>(),
+				vec![99, 999, 92]
+			);
+
+			roll_to_signed_validation_open();
+
+			// 92 is slashed in 3 blocks, 999 becomes rewarded in 3 blocks, , and 99 is discarded.
+			roll_next();
+			roll_next();
+			roll_next();
+
+			assert_eq!(
+				Submissions::<Runtime>::leaderboard(0)
+					.into_iter()
+					.map(|(x, _)| x)
+					.collect::<Vec<_>>(),
+				vec![99, 999]
+			);
+
+			roll_next();
+			roll_next();
+			roll_next();
+
+			assert_eq!(
+				signed_events(),
+				vec![
+					Event::Registered(
+						0,
+						99,
+						ElectionScore { minimal_stake: 10, sum_stake: 10, sum_stake_squared: 100 }
+					),
+					Event::Stored(0, 99, 0),
+					Event::Registered(
+						0,
+						999,
+						ElectionScore {
+							minimal_stake: 55,
+							sum_stake: 130,
+							sum_stake_squared: 8650
+						}
+					),
+					Event::Stored(0, 999, 0),
+					Event::Stored(0, 999, 1),
+					Event::Stored(0, 999, 2),
+					Event::Registered(
+						0,
+						92,
+						ElectionScore {
+							minimal_stake: 110,
+							sum_stake: 130,
+							sum_stake_squared: 8650
+						}
+					),
+					Event::Slashed(0, 92, 5),
+					Event::Rewarded(0, 999, 7),
+					Event::Discarded(0, 99)
+				]
+			);
+
+			assert_eq!(
+				verifier_events(),
+				vec![
+					crate::verifier::Event::Verified(2, 0),
+					crate::verifier::Event::Verified(1, 0),
+					crate::verifier::Event::Verified(0, 0),
+					crate::verifier::Event::VerificationFailed(0, FeasibilityError::InvalidScore),
+					crate::verifier::Event::Verified(2, 2),
+					crate::verifier::Event::Verified(1, 2),
+					crate::verifier::Event::Verified(0, 2),
+					crate::verifier::Event::Queued(
+						ElectionScore {
+							minimal_stake: 55,
+							sum_stake: 130,
+							sum_stake_squared: 8650
+						},
+						None
+					)
+				]
+			);
+
+			assert_eq!(balances(99), (100, 0));
+			assert_eq!(balances(999), (107, 0));
+			assert_eq!(balances(92), (95, 0));
+
+			// signed pallet should be in 100% clean state.
+			assert_ok!(Submissions::<Runtime>::ensure_killed(0));
+		})
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/types.rs b/substrate/frame/election-provider-multi-block/src/types.rs
new file mode 100644
index 00000000000..9657277a79e
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/types.rs
@@ -0,0 +1,363 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use frame_support::{
+	BoundedVec, CloneNoBound, DebugNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound,
+};
+use sp_core::Get;
+use sp_std::{collections::btree_set::BTreeSet, fmt::Debug, prelude::*};
+
+use crate::unsigned::miner::MinerConfig;
+use codec::{Decode, Encode, MaxEncodedLen};
+use frame_election_provider_support::ElectionProvider;
+pub use frame_election_provider_support::{NposSolution, PageIndex};
+use scale_info::TypeInfo;
+pub use sp_npos_elections::{ElectionResult, ElectionScore};
+use sp_runtime::SaturatedConversion;
+
+/// The solution type used by this crate.
+pub type SolutionOf<T> = <T as MinerConfig>::Solution;
+/// The voter index. Derived from [`SolutionOf`].
+pub type SolutionVoterIndexOf<T> = <SolutionOf<T> as NposSolution>::VoterIndex;
+/// The target index. Derived from [`SolutionOf`].
+pub type SolutionTargetIndexOf<T> = <SolutionOf<T> as NposSolution>::TargetIndex;
+/// The accuracy of the election, when submitted from offchain. Derived from [`SolutionOf`].
+pub type SolutionAccuracyOf<T> = <SolutionOf<T> as NposSolution>::Accuracy;
+/// The fallback election type.
+pub type FallbackErrorOf<T> = <<T as crate::Config>::Fallback as ElectionProvider>::Error;
+
+/// The relative distribution of a voter's stake among the winning targets.
+pub type AssignmentOf<T> =
+	sp_npos_elections::Assignment<<T as MinerConfig>::AccountId, SolutionAccuracyOf<T>>;
+
+/// A paginated raw solution type.
+///
+/// This is the representation of a stored, unverified solution.
+///
+/// After feasibility, it is convered into `Supports`.
+#[derive(
+	TypeInfo,
+	Encode,
+	Decode,
+	DebugNoBound,
+	CloneNoBound,
+	EqNoBound,
+	PartialEqNoBound,
+	MaxEncodedLen,
+	DefaultNoBound,
+)]
+#[codec(mel_bound(T: crate::Config))]
+#[scale_info(skip_type_params(T))]
+pub struct PagedRawSolution<T: MinerConfig> {
+	/// The individual pages.
+	pub solution_pages: BoundedVec<SolutionOf<T>, <T as MinerConfig>::Pages>,
+	/// The final claimed score post feasibility and concatenation of all apges.
+	pub score: ElectionScore,
+	/// The designated round.
+	pub round: u32,
+}
+
+impl<T: MinerConfig> PagedRawSolution<T> {
+	/// Get the total number of voters, assuming that voters in each page are unique.
+	pub fn voter_count(&self) -> usize {
+		self.solution_pages
+			.iter()
+			.map(|page| page.voter_count())
+			.fold(0usize, |acc, x| acc.saturating_add(x))
+	}
+
+	/// Get the total number of winners, assuming that there's only a single page of targets.
+	pub fn winner_count_single_page_target_snapshot(&self) -> usize {
+		self.solution_pages
+			.iter()
+			.map(|page| page.unique_targets())
+			.into_iter()
+			.flatten()
+			.collect::<BTreeSet<_>>()
+			.len()
+	}
+
+	/// Get the total number of edges.
+	pub fn edge_count(&self) -> usize {
+		self.solution_pages
+			.iter()
+			.map(|page| page.edge_count())
+			.fold(0usize, |acc, x| acc.saturating_add(x))
+	}
+}
+
+/// A helper trait to deal with the page index of partial solutions.
+///
+/// This should only be called on the `Vec<Solution>` or similar types. If the solution is *full*,
+/// then it returns a normal iterator that is just mapping the index (usize) to `PageIndex`.
+///
+/// if the solution is partial, it shifts the indices sufficiently so that the most significant page
+/// of the solution matches with the most significant page of the snapshot onchain.
+///
+/// See the tests below for examples.
+pub trait Pagify<T> {
+	/// Pagify a reference.
+	fn pagify(&self, bound: PageIndex) -> Box<dyn Iterator<Item = (PageIndex, &T)> + '_>;
+	/// Consume and pagify
+	fn into_pagify(self, bound: PageIndex) -> Box<dyn Iterator<Item = (PageIndex, T)>>;
+}
+
+impl<T> Pagify<T> for Vec<T> {
+	fn pagify(&self, desired_pages: PageIndex) -> Box<dyn Iterator<Item = (PageIndex, &T)> + '_> {
+		Box::new(
+			self.into_iter()
+				.enumerate()
+				.map(|(p, s)| (p.saturated_into::<PageIndex>(), s))
+				.map(move |(p, s)| {
+					let desired_pages_usize = desired_pages as usize;
+					// TODO: this could be an error.
+					debug_assert!(self.len() <= desired_pages_usize);
+					let padding = desired_pages_usize.saturating_sub(self.len());
+					let new_page = p.saturating_add(padding.saturated_into::<PageIndex>());
+					(new_page, s)
+				}),
+		)
+	}
+
+	fn into_pagify(self, _: PageIndex) -> Box<dyn Iterator<Item = (PageIndex, T)>> {
+		todo!()
+	}
+}
+
+/// Helper trait to pad a partial solution such that the leftover pages are filled with zero.
+///
+/// See the tests below for examples.
+pub trait PadSolutionPages: Sized {
+	/// Pad the solution to the given number of pages.
+	fn pad_solution_pages(self, desired_pages: PageIndex) -> Self;
+}
+
+impl<T: Default + Clone + Debug, Bound: frame_support::traits::Get<u32>> PadSolutionPages
+	for BoundedVec<T, Bound>
+{
+	fn pad_solution_pages(self, desired_pages: PageIndex) -> Self {
+		let desired_pages_usize = (desired_pages).min(Bound::get()) as usize;
+		debug_assert!(self.len() <= desired_pages_usize);
+		if self.len() == desired_pages_usize {
+			return self
+		}
+
+		// we basically need to prepend the list with this many items.
+		let empty_slots = desired_pages_usize.saturating_sub(self.len());
+		let self_as_vec = sp_std::iter::repeat(Default::default())
+			.take(empty_slots)
+			.chain(self.into_iter())
+			.collect::<Vec<_>>();
+		self_as_vec.try_into().expect("sum of both iterators has at most `desired_pages_usize` items; `desired_pages_usize` is `min`-ed by `Bound`; conversion cannot fail; qed")
+	}
+}
+
+// NOTE on naming conventions: type aliases that end with `Of` should always be `Of<T: Config>`.
+
+/// Alias for a voter, parameterized by the miner config.
+pub(crate) type VoterOf<T> = frame_election_provider_support::Voter<
+	<T as MinerConfig>::AccountId,
+	<T as MinerConfig>::MaxVotesPerVoter,
+>;
+
+/// Alias for a page of voters, parameterized by this crate's config.
+pub(crate) type VoterPageOf<T> = BoundedVec<VoterOf<T>, <T as MinerConfig>::VoterSnapshotPerBlock>;
+
+/// Alias for all pages of voters, parameterized by this crate's config.
+pub(crate) type AllVoterPagesOf<T> = BoundedVec<VoterPageOf<T>, <T as MinerConfig>::Pages>;
+
+/// Maximum number of items that [`AllVoterPagesOf`] can contain, when flattened.
+pub(crate) struct MaxFlattenedVoters<T: MinerConfig>(sp_std::marker::PhantomData<T>);
+impl<T: MinerConfig> Get<u32> for MaxFlattenedVoters<T> {
+	fn get() -> u32 {
+		T::VoterSnapshotPerBlock::get().saturating_mul(T::Pages::get())
+	}
+}
+
+/// Same as [`AllVoterPagesOf`], but instead of being a nested bounded vec, the entire voters are
+/// flattened into one outer, unbounded `Vec` type.
+///
+/// This is bounded by [`MaxFlattenedVoters`].
+pub(crate) type AllVoterPagesFlattenedOf<T> = BoundedVec<VoterOf<T>, MaxFlattenedVoters<T>>;
+
+/// Encodes the length of a solution or a snapshot.
+///
+/// This is stored automatically on-chain, and it contains the **size of the entire snapshot**.
+/// This is also used in dispatchables as weight witness data and should **only contain the size of
+/// the presented solution**, not the entire snapshot.
+#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, Debug, Default, TypeInfo, MaxEncodedLen)]
+pub struct SolutionOrSnapshotSize {
+	/// The length of voters.
+	#[codec(compact)]
+	pub voters: u32,
+	/// The length of targets.
+	#[codec(compact)]
+	pub targets: u32,
+}
+
+// TODO: we are not using this anywhere.
+/// The type of `Computation` that provided this election data.
+#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)]
+pub enum ElectionCompute {
+	/// Election was computed on-chain.
+	OnChain,
+	/// Election was computed with a signed submission.
+	Signed,
+	/// Election was computed with an unsigned submission.
+	Unsigned,
+	/// Election was computed with emergency status.
+	Emergency,
+}
+
+impl Default for ElectionCompute {
+	fn default() -> Self {
+		ElectionCompute::OnChain
+	}
+}
+
+/// Current phase of the pallet.
+#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, MaxEncodedLen, Debug, TypeInfo)]
+pub enum Phase<Bn> {
+	/// Nothing is happening, and nothing will happen.
+	Halted,
+	/// Nothing is happening, but it might.
+	Off,
+	/// Signed phase is open.
+	Signed,
+	/// We are validating results.
+	///
+	/// The inner value is the block number at which this phase started. This helps with
+	/// synchronizing different sub-systems.
+	///
+	/// This always follows the signed phase, and is a window of time in which we try to validate
+	/// our signed results.
+	SignedValidation(Bn),
+	/// Unsigned phase. First element is whether it is active or not, second the starting block
+	/// number.
+	///
+	/// We do not yet check whether the unsigned phase is active or passive. The intent is for the
+	/// blockchain to be able to declare: "I believe that there exists an adequate signed
+	/// solution," advising validators not to bother running the unsigned offchain worker.
+	///
+	/// As validator nodes are free to edit their OCW code, they could simply ignore this advisory
+	/// and always compute their own solution. However, by default, when the unsigned phase is
+	/// passive, the offchain workers will not bother running.
+	Unsigned(Bn),
+	/// Snapshot is being created. No other operation is allowed. This can be one or more blocks.
+	/// The inner value should be read as "`remaining` number of pages are left to be fetched".
+	/// Thus, if inner value is `0` if the snapshot is complete and we are ready to move on.
+	///
+	/// This value should be interpreted after `on_initialize` of this pallet has already been
+	/// called.
+	Snapshot(PageIndex),
+	/// Exporting has begun, and the given page was the last one received.
+	///
+	/// Once this is active, no more signed or solutions will be accepted.
+	Export(PageIndex),
+	/// The emergency phase. This is enabled upon a failing call to `T::ElectionProvider::elect`.
+	/// After that, the only way to leave this phase is through a successful
+	/// `T::ElectionProvider::elect`.
+	Emergency,
+}
+
+impl<Bn> Default for Phase<Bn> {
+	fn default() -> Self {
+		Phase::Off
+	}
+}
+
+impl<Bn: PartialEq + Eq> Phase<Bn> {
+	/// Whether the phase is emergency or not.
+	pub fn is_emergency(&self) -> bool {
+		matches!(self, Phase::Emergency)
+	}
+
+	/// Whether the phase is signed or not.
+	pub fn is_signed(&self) -> bool {
+		matches!(self, Phase::Signed)
+	}
+
+	/// Whether the phase is unsigned or not.
+	pub fn is_unsigned(&self) -> bool {
+		matches!(self, Phase::Unsigned(_))
+	}
+
+	/// Whether the phase is unsigned and open or not, with specific start.
+	pub fn is_unsigned_open_at(&self, at: Bn) -> bool {
+		matches!(self, Phase::Unsigned(real) if *real == at)
+	}
+
+	/// Whether the phase is off or not.
+	pub fn is_off(&self) -> bool {
+		matches!(self, Phase::Off)
+	}
+
+	/// Whether the phase is export or not.
+	pub fn is_export(&self) -> bool {
+		matches!(self, Phase::Export(_))
+	}
+
+	/// Whether the phase is halted or not.
+	pub fn is_halted(&self) -> bool {
+		matches!(self, Phase::Halted)
+	}
+
+	/// Whether the phase is signed validation or not.
+	pub fn is_signed_validation(&self) -> bool {
+		matches!(self, Phase::SignedValidation(_))
+	}
+
+	/// Whether the phase is signed validation or not, with specific start.
+	pub fn is_signed_validation_open_at(&self, at: Bn) -> bool {
+		matches!(self, Phase::SignedValidation(real) if *real == at)
+	}
+}
+
+#[cfg(test)]
+mod pagify {
+	use super::{PadSolutionPages, Pagify};
+	use frame_support::{traits::ConstU32, BoundedVec};
+	use sp_core::bounded_vec;
+
+	#[test]
+	fn pagify_works() {
+		// is a noop when you have the same length
+		assert_eq!(
+			vec![10, 11, 12].pagify(3).collect::<Vec<_>>(),
+			vec![(0, &10), (1, &11), (2, &12)]
+		);
+
+		// pads the values otherwise
+		assert_eq!(vec![10, 11].pagify(3).collect::<Vec<_>>(), vec![(1, &10), (2, &11)]);
+		assert_eq!(vec![10].pagify(3).collect::<Vec<_>>(), vec![(2, &10)]);
+	}
+
+	#[test]
+	fn pad_solution_pages_works() {
+		// noop if the solution is complete, as with pagify.
+		let solution: BoundedVec<_, ConstU32<3>> = bounded_vec![1u32, 2, 3];
+		assert_eq!(solution.pad_solution_pages(3).into_inner(), vec![1, 2, 3]);
+
+		// pads the solution with default if partial..
+		let solution: BoundedVec<_, ConstU32<3>> = bounded_vec![2, 3];
+		assert_eq!(solution.pad_solution_pages(3).into_inner(), vec![0, 2, 3]);
+
+		// behaves the same as `pad_solution_pages(3)`.
+		let solution: BoundedVec<_, ConstU32<3>> = bounded_vec![2, 3];
+		assert_eq!(solution.pad_solution_pages(4).into_inner(), vec![0, 2, 3]);
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/benchmarking.rs b/substrate/frame/election-provider-multi-block/src/unsigned/benchmarking.rs
new file mode 100644
index 00000000000..76efe9d9492
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/unsigned/benchmarking.rs
@@ -0,0 +1,79 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::{
+	unsigned::{miner::OffchainWorkerMiner, Call, Config, Pallet},
+	verifier::Verifier,
+	CurrentPhase, Phase,
+};
+use frame_benchmarking::v2::*;
+use frame_election_provider_support::ElectionDataProvider;
+use frame_support::{assert_ok, pallet_prelude::*};
+use frame_system::RawOrigin;
+use sp_std::boxed::Box;
+#[benchmarks(where T: crate::Config + crate::signed::Config + crate::verifier::Config)]
+mod benchmarks {
+	use super::*;
+
+	#[benchmark]
+	fn validate_unsigned() -> Result<(), BenchmarkError> {
+		// TODO: for now we are not using this, maybe remove?
+		// roll to unsigned phase open
+		T::DataProvider::set_next_election(crate::Pallet::<T>::reasonable_next_election());
+		crate::Pallet::<T>::roll_until_matches(|| {
+			matches!(CurrentPhase::<T>::get(), Phase::Unsigned(_))
+		});
+		let call: Call<T> = OffchainWorkerMiner::<T>::mine_solution(1, false)
+			.map(|solution| Call::submit_unsigned { paged_solution: Box::new(solution) })
+			.unwrap();
+
+		#[block]
+		{
+			assert_ok!(Pallet::<T>::validate_unsigned(TransactionSource::Local, &call));
+		}
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn submit_unsigned() -> Result<(), BenchmarkError> {
+		// roll to unsigned phase open
+		T::DataProvider::set_next_election(crate::Pallet::<T>::reasonable_next_election());
+		crate::Pallet::<T>::roll_until_matches(|| {
+			matches!(CurrentPhase::<T>::get(), Phase::Unsigned(_))
+		});
+		// TODO: we need to better ensure that this is actually worst case
+		let solution = OffchainWorkerMiner::<T>::mine_solution(1, false).unwrap();
+
+		// nothing is queued
+		assert!(T::Verifier::queued_score().is_none());
+		#[block]
+		{
+			assert_ok!(Pallet::<T>::submit_unsigned(RawOrigin::None.into(), Box::new(solution)));
+		}
+
+		// something is queued
+		assert!(T::Verifier::queued_score().is_some());
+		Ok(())
+	}
+
+	impl_benchmark_test_suite!(
+		Pallet,
+		crate::mock::ExtBuilder::full().build_unchecked(),
+		crate::mock::Runtime
+	);
+}
diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs b/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs
new file mode 100644
index 00000000000..cccfef13983
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs
@@ -0,0 +1,1972 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use super::{Call, Config, Pallet};
+use crate::{
+	helpers,
+	types::{PadSolutionPages, *},
+	verifier::{self},
+	CommonError,
+};
+use codec::Encode;
+use frame_election_provider_support::{ExtendedBalance, NposSolver, Support, VoteWeight};
+use frame_support::{traits::Get, BoundedVec};
+use frame_system::pallet_prelude::*;
+use scale_info::TypeInfo;
+use sp_npos_elections::EvaluateSupport;
+use sp_runtime::{
+	offchain::storage::{MutateStorageError, StorageValueRef},
+	traits::{SaturatedConversion, Saturating, Zero},
+};
+use sp_std::{collections::btree_map::BTreeMap, prelude::*};
+
+/// The type of the snapshot.
+///
+/// Used to express errors.
+#[derive(Debug, Eq, PartialEq)]
+pub enum SnapshotType {
+	/// Voters at the given page missing.
+	Voters(PageIndex),
+	/// Targets missing.
+	Targets,
+	/// Metadata missing.
+	Metadata,
+	/// Desired targets missing.
+	DesiredTargets,
+}
+
+pub(crate) type MinerSolverErrorOf<T> = <<T as MinerConfig>::Solver as NposSolver>::Error;
+
+/// The errors related to the [`BaseMiner`].
+#[derive(
+	frame_support::DebugNoBound, frame_support::EqNoBound, frame_support::PartialEqNoBound,
+)]
+pub enum MinerError<T: MinerConfig> {
+	/// An internal error in the NPoS elections crate.
+	NposElections(sp_npos_elections::Error),
+	/// An internal error in the generic solver.
+	Solver(MinerSolverErrorOf<T>),
+	/// Snapshot data was unavailable unexpectedly.
+	SnapshotUnAvailable(SnapshotType),
+	/// The base, common errors from the pallet.
+	Common(CommonError),
+	/// The solution generated from the miner is not feasible.
+	Feasibility(verifier::FeasibilityError),
+	/// Some page index has been invalid.
+	InvalidPage,
+	/// Too many winners were removed during trimming.
+	TooManyWinnersRemoved,
+	/// A defensive error has occurred.
+	Defensive(&'static str),
+}
+
+impl<T: MinerConfig> From<sp_npos_elections::Error> for MinerError<T> {
+	fn from(e: sp_npos_elections::Error) -> Self {
+		MinerError::NposElections(e)
+	}
+}
+
+impl<T: MinerConfig> From<verifier::FeasibilityError> for MinerError<T> {
+	fn from(e: verifier::FeasibilityError) -> Self {
+		MinerError::Feasibility(e)
+	}
+}
+
+impl<T: MinerConfig> From<CommonError> for MinerError<T> {
+	fn from(e: CommonError) -> Self {
+		MinerError::Common(e)
+	}
+}
+
+/// The errors related to the `OffchainWorkerMiner`.
+#[derive(
+	frame_support::DebugNoBound, frame_support::EqNoBound, frame_support::PartialEqNoBound,
+)]
+pub(crate) enum OffchainMinerError<T: Config> {
+	/// An error in the base miner.
+	BaseMiner(MinerError<T::MinerConfig>),
+	/// The base, common errors from the pallet.
+	Common(CommonError),
+	/// Something went wrong fetching the lock.
+	Lock(&'static str),
+	/// Submitting a transaction to the pool failed.
+	PoolSubmissionFailed,
+	/// Cannot restore a solution that was not stored.
+	NoStoredSolution,
+	/// Cached solution is not a `submit_unsigned` call.
+	SolutionCallInvalid,
+	/// Failed to store a solution.
+	FailedToStoreSolution,
+}
+
+impl<T: Config> From<MinerError<T::MinerConfig>> for OffchainMinerError<T> {
+	fn from(e: MinerError<T::MinerConfig>) -> Self {
+		OffchainMinerError::BaseMiner(e)
+	}
+}
+
+impl<T: Config> From<CommonError> for OffchainMinerError<T> {
+	fn from(e: CommonError) -> Self {
+		OffchainMinerError::Common(e)
+	}
+}
+
+/// Configurations for the miner.
+///
+/// This is extracted from the main crate's config so that an offchain miner can readily use the
+/// [`BaseMiner`] without needing to deal with the rest of the pallet's configuration.
+pub trait MinerConfig {
+	/// The account id type.
+	type AccountId: Ord + Clone + codec::Codec + core::fmt::Debug;
+	/// The solution that the miner is mining.
+	/// The solution type.
+	type Solution: codec::FullCodec
+		+ Default
+		+ PartialEq
+		+ Eq
+		+ Clone
+		+ sp_std::fmt::Debug
+		+ Ord
+		+ NposSolution
+		+ TypeInfo
+		+ codec::MaxEncodedLen;
+	/// The solver type.
+	type Solver: NposSolver<AccountId = Self::AccountId>;
+	/// The maximum length that the miner should use for a solution, per page.
+	type MaxLength: Get<u32>;
+	/// Maximum number of votes per voter.
+	///
+	/// Must be the same as configured in the [`crate::Config::DataProvider`].
+	type MaxVotesPerVoter: Get<u32>;
+	/// Maximum number of winners to select per page.
+	///
+	/// The miner should respect this, it is used for trimming, and bounded data types.
+	///
+	/// Should equal to the onchain value set in `Verifier::Config`.
+	type MaxWinnersPerPage: Get<u32>;
+	/// Maximum number of backers per winner, per page.
+	///
+	/// The miner should respect this, it is used for trimming, and bounded data types.
+	///
+	/// Should equal to the onchain value set in `Verifier::Config`.
+	type MaxBackersPerWinner: Get<u32>;
+	/// Maximum number of backers, per winner, across all pages.
+	///
+	/// The miner should respect this, it is used for trimming, and bounded data types.
+	///
+	/// Should equal to the onchain value set in `Verifier::Config`.
+	type MaxBackersPerWinnerFinal: Get<u32>;
+	/// Maximum number of backers, per winner, per page.
+
+	/// Maximum number of pages that we may compute.
+	///
+	/// Must be the same as configured in the [`crate::Config`].
+	type Pages: Get<u32>;
+	/// Maximum number of voters per snapshot page.
+	///
+	/// Must be the same as configured in the [`crate::Config`].
+	type VoterSnapshotPerBlock: Get<u32>;
+	/// Maximum number of targets per snapshot page.
+	///
+	/// Must be the same as configured in the [`crate::Config`].
+	type TargetSnapshotPerBlock: Get<u32>;
+	/// The hash type of the runtime.
+	type Hash: Eq + PartialEq;
+}
+
+/// A base miner that is only capable of mining a new solution and checking it against the state of
+/// this pallet for feasibility, and trimming its length/weight.
+pub struct BaseMiner<T: MinerConfig>(sp_std::marker::PhantomData<T>);
+
+/// Parameterized `BoundedSupports` for the miner.
+pub type SupportsOfMiner<T> = frame_election_provider_support::BoundedSupports<
+	<T as MinerConfig>::AccountId,
+	<T as MinerConfig>::MaxWinnersPerPage,
+	<T as MinerConfig>::MaxBackersPerWinner,
+>;
+
+/// Aggregator for inputs to [`BaseMiner`].
+pub struct MineInput<T: MinerConfig> {
+	/// Number of winners to pick.
+	pub desired_targets: u32,
+	/// All of the targets.
+	pub all_targets: BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>,
+	/// Paginated list of voters.
+	///
+	/// Note for staking-miners: How this is calculated is rather delicate, and the order of the
+	/// nested vectors matter. See carefully how `OffchainWorkerMiner::mine_solution` is doing
+	/// this.
+	pub voter_pages: AllVoterPagesOf<T>,
+	/// Number of pages to mind.
+	///
+	/// Note for staking-miner: Always use [`MinerConfig::Pages`] unless explicitly wanted
+	/// otherwise.
+	pub pages: PageIndex,
+	/// Whether to reduce the solution. Almost always``
+	pub do_reduce: bool,
+	/// The current round for which the solution is being calculated.
+	pub round: u32,
+}
+
+impl<T: MinerConfig> BaseMiner<T> {
+	/// Mine a new npos solution, with the given number of pages.
+	///
+	/// This miner is only capable of mining a solution that either uses all of the pages of the
+	/// snapshot, or the top `pages` thereof.
+	///
+	/// This always trims the solution to match a few parameters:
+	///
+	/// [`MinerConfig::MaxWinnersPerPage`], [`MinerConfig::MaxBackersPerWinner`],
+	/// [`MinerConfig::MaxBackersPerWinnerFinal`] and [`MinerConfig::MaxLength`].
+	///
+	/// The order of pages returned is aligned with the snapshot. For example, the index 0 of the
+	/// returning solution pages corresponds to the page 0 of the snapshot.
+	///
+	/// The only difference is, if the solution is partial, then [`Pagify`] must be used to properly
+	/// pad the results.
+	pub fn mine_solution(
+		MineInput { desired_targets, all_targets, voter_pages, mut pages, do_reduce, round }: MineInput<
+			T,
+		>,
+	) -> Result<PagedRawSolution<T>, MinerError<T>> {
+		pages = pages.min(T::Pages::get());
+
+		// we also build this closure early, so we can let `targets` be consumed.
+		let voter_page_fn = helpers::generate_voter_page_fn::<T>(&voter_pages);
+		let target_index_fn = helpers::target_index_fn::<T>(&all_targets);
+
+		// now flatten the voters, ready to be used as if pagination did not existed.
+		let all_voters: AllVoterPagesFlattenedOf<T> = voter_pages
+			.iter()
+			.cloned()
+			.flatten()
+			.collect::<Vec<_>>()
+			.try_into()
+			.expect("Flattening the voters into `AllVoterPagesFlattenedOf` cannot fail; qed");
+
+		let ElectionResult { winners: _, assignments } = T::Solver::solve(
+			desired_targets as usize,
+			all_targets.clone().to_vec(),
+			all_voters.clone().into_inner(),
+		)
+		.map_err(|e| MinerError::Solver(e))?;
+
+		// reduce and trim supports. We don't trim length and weight here, since those are dependent
+		// on the final form of the solution ([`PagedRawSolution`]), thus we do it later.
+		let trimmed_assignments = {
+			// Implementation note: the overall code path is as follows: election_results ->
+			// assignments -> staked assignments -> reduce -> supports -> trim supports -> staked
+			// assignments -> final assignments
+			// This is by no means the most performant, but is the clear and correct.
+			use sp_npos_elections::{
+				assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized,
+				reduce, supports_to_staked_assignment, to_supports, EvaluateSupport,
+			};
+
+			// These closures are of no use in the rest of these code, since they only deal with the
+			// overall list of voters.
+			let cache = helpers::generate_voter_cache::<T, _>(&all_voters);
+			let stake_of = helpers::stake_of_fn::<T, _>(&all_voters, &cache);
+
+			// 1. convert to staked and reduce
+			let (reduced_count, staked) = {
+				let mut staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)
+					.map_err::<MinerError<T>, _>(Into::into)?;
+
+				// first, reduce the solution if requested. This will already remove a lot of
+				// "redundant" and reduce the chance for the need of any further trimming.
+				let count = if do_reduce { reduce(&mut staked) } else { 0 };
+				(count, staked)
+			};
+
+			// 2. trim the supports by backing.
+			let (_pre_score, final_trimmed_assignments, winners_removed, backers_removed) = {
+				// these supports could very well be invalid for SCORE purposes. The reason is that
+				// you might trim out half of an account's stake, but we don't look for this
+				// account's other votes to fix it.
+				let supports_invalid_score = to_supports(&staked);
+
+				let pre_score = (&supports_invalid_score).evaluate();
+				let (bounded_invalid_score, winners_removed, backers_removed) =
+					SupportsOfMiner::<T>::sorted_truncate_from(supports_invalid_score);
+
+				// now recreated the staked assignments
+				let staked = supports_to_staked_assignment(bounded_invalid_score.into());
+				let assignments = assignment_staked_to_ratio_normalized(staked)
+					.map_err::<MinerError<T>, _>(Into::into)?;
+				(pre_score, assignments, winners_removed, backers_removed)
+			};
+
+			miner_log!(
+				debug,
+				"initial score = {:?}, reduced {} edges, trimmed {} winners from supports, trimmed {} backers from support",
+				_pre_score,
+				reduced_count,
+				winners_removed,
+				backers_removed,
+			);
+
+			final_trimmed_assignments
+		};
+
+		// split the assignments into different pages.
+		let mut paged_assignments: BoundedVec<Vec<AssignmentOf<T>>, T::Pages> =
+			BoundedVec::with_bounded_capacity(pages as usize);
+		paged_assignments.bounded_resize(pages as usize, Default::default());
+		for assignment in trimmed_assignments {
+			// NOTE: this `page` index is LOCAL. It does not correspond to the actual page index of
+			// the snapshot map, but rather the index in the `voter_pages`.
+			let page = voter_page_fn(&assignment.who).ok_or(MinerError::InvalidPage)?;
+			let assignment_page =
+				paged_assignments.get_mut(page as usize).ok_or(MinerError::InvalidPage)?;
+			assignment_page.push(assignment);
+		}
+
+		// convert each page to a compact struct
+		let solution_pages: BoundedVec<SolutionOf<T>, T::Pages> = paged_assignments
+			.into_iter()
+			.enumerate()
+			.map(|(page_index, assignment_page)| {
+				// get the page of the snapshot that corresponds to this page of the assignments.
+				let page: PageIndex = page_index.saturated_into();
+				let voter_snapshot_page = voter_pages
+					.get(page as usize)
+					.ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Voters(page)))?;
+
+				let voter_index_fn = {
+					let cache = helpers::generate_voter_cache::<T, _>(&voter_snapshot_page);
+					helpers::voter_index_fn_owned::<T>(cache)
+				};
+				<SolutionOf<T>>::from_assignment(
+					&assignment_page,
+					&voter_index_fn,
+					&target_index_fn,
+				)
+				.map_err::<MinerError<T>, _>(Into::into)
+			})
+			.collect::<Result<Vec<_>, _>>()?
+			.try_into()
+			.expect("`paged_assignments` is bound by `T::Pages`; length cannot change in iter chain; qed");
+
+		// now do the weight and length trim.
+		let mut solution_pages_unbounded = solution_pages.into_inner();
+		let _trim_length_weight =
+			Self::maybe_trim_weight_and_len(&mut solution_pages_unbounded, &voter_pages)?;
+		let solution_pages = solution_pages_unbounded
+			.try_into()
+			.expect("maybe_trim_weight_and_len cannot increase the length of its input; qed.");
+		miner_log!(debug, "trimmed {} voters due to length restriction.", _trim_length_weight);
+
+		// finally, wrap everything up. Assign a fake score here, since we might need to re-compute
+		// it.
+		let mut paged = PagedRawSolution { round, solution_pages, score: Default::default() };
+
+		// OPTIMIZATION: we do feasibility_check inside `compute_score`, and once later
+		// pre_dispatch. I think it is fine, but maybe we can improve it.
+		let score = Self::compute_score(&paged, &voter_pages, &all_targets, desired_targets)
+			.map_err::<MinerError<T>, _>(Into::into)?;
+		paged.score = score;
+
+		miner_log!(
+			info,
+			"mined a solution with {} pages, score {:?}, {} winners, {} voters, {} edges, and {} bytes",
+			pages,
+			score,
+			paged.winner_count_single_page_target_snapshot(),
+			paged.voter_count(),
+			paged.edge_count(),
+			paged.using_encoded(|b| b.len())
+		);
+
+		Ok(paged)
+	}
+
+	/// perform the feasibility check on all pages of a solution, returning `Ok(())` if all good and
+	/// the corresponding error otherwise.
+	pub fn check_feasibility(
+		paged_solution: &PagedRawSolution<T>,
+		paged_voters: &AllVoterPagesOf<T>,
+		snapshot_targets: &BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>,
+		desired_targets: u32,
+		solution_type: &str,
+	) -> Result<Vec<SupportsOfMiner<T>>, MinerError<T>> {
+		// check every solution page for feasibility.
+		let padded_voters = paged_voters.clone().pad_solution_pages(T::Pages::get());
+		paged_solution
+			.solution_pages
+			.pagify(T::Pages::get())
+			.map(|(page_index, page_solution)| {
+				verifier::feasibility_check_page_inner_with_snapshot::<T>(
+					page_solution.clone(),
+					&padded_voters[page_index as usize],
+					snapshot_targets,
+					desired_targets,
+				)
+			})
+			.collect::<Result<Vec<_>, _>>()
+			.map_err(|err| {
+				miner_log!(
+					warn,
+					"feasibility check failed for {} solution at: {:?}",
+					solution_type,
+					err
+				);
+				MinerError::from(err)
+			})
+			.and_then(|supports| {
+				// TODO: Check `MaxBackersPerWinnerFinal`
+				Ok(supports)
+			})
+	}
+
+	/// Take the given raw paged solution and compute its score. This will replicate what the chain
+	/// would do as closely as possible, and expects all the corresponding snapshot data to be
+	/// available.
+	fn compute_score(
+		paged_solution: &PagedRawSolution<T>,
+		paged_voters: &AllVoterPagesOf<T>,
+		all_targets: &BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>,
+		desired_targets: u32,
+	) -> Result<ElectionScore, MinerError<T>> {
+		let all_supports = Self::check_feasibility(
+			paged_solution,
+			paged_voters,
+			all_targets,
+			desired_targets,
+			"mined",
+		)?;
+		let mut total_backings: BTreeMap<T::AccountId, ExtendedBalance> = BTreeMap::new();
+		all_supports.into_iter().flat_map(|x| x.0).for_each(|(who, support)| {
+			let backing = total_backings.entry(who).or_default();
+			*backing = backing.saturating_add(support.total);
+		});
+
+		let all_supports = total_backings
+			.into_iter()
+			.map(|(who, total)| (who, Support { total, ..Default::default() }))
+			.collect::<Vec<_>>();
+
+		Ok((&all_supports).evaluate())
+	}
+
+	/// Trim the given supports so that the count of backings in none of them exceeds
+	/// [`crate::verifier::Config::MaxBackersPerWinner`].
+	///
+	/// Note that this should only be called on the *global, non-paginated* supports. Calling this
+	/// on a single page of supports is essentially pointless and does not guarantee anything in
+	/// particular.
+	///
+	/// Returns the count of supports trimmed.
+	pub fn trim_supports(supports: &mut sp_npos_elections::Supports<T::AccountId>) -> u32 {
+		let limit = T::MaxBackersPerWinner::get() as usize;
+		let mut count = 0;
+		supports
+			.iter_mut()
+			.filter_map(
+				|(_, support)| if support.voters.len() > limit { Some(support) } else { None },
+			)
+			.for_each(|support| {
+				support.voters.sort_unstable_by(|(_, b1), (_, b2)| b1.cmp(&b2).reverse());
+				support.voters.truncate(limit);
+				support.total = support.voters.iter().fold(0, |acc, (_, x)| acc.saturating_add(*x));
+				count.saturating_inc();
+			});
+		count
+	}
+
+	/// Maybe tim the weight and length of the given multi-page solution.
+	///
+	/// Returns the number of voters removed.
+	///
+	/// If either of the bounds are not met, the trimming strategy is as follows:
+	///
+	/// Start from the least significant page. Assume only this page is going to be trimmed. call
+	/// `page.sort()` on this page. This will make sure in each field (`votes1`, `votes2`, etc.) of
+	/// that page, the voters are sorted by descending stake. Then, we compare the last item of each
+	/// field. This is the process of removing the single least staked voter.
+	///
+	/// We repeat this until satisfied, for both weight and length. If a full page is removed, but
+	/// the bound is not satisfied, we need to make sure that we sort the next least valuable page,
+	/// and repeat the same process.
+	///
+	/// NOTE: this is a public function to be used by the `OffchainWorkerMiner` or any similar one,
+	/// based on the submission strategy. The length and weight bounds of a call are dependent on
+	/// the number of pages being submitted, the number of blocks over which we submit, and the type
+	/// of the transaction and its weight (e.g. signed or unsigned).
+	///
+	/// NOTE: It could be that this function removes too many voters, and the solution becomes
+	/// invalid. This is not yet handled and only a warning is emitted.
+	pub fn maybe_trim_weight_and_len(
+		solution_pages: &mut Vec<SolutionOf<T>>,
+		paged_voters: &AllVoterPagesOf<T>,
+	) -> Result<u32, MinerError<T>> {
+		debug_assert_eq!(solution_pages.len(), paged_voters.len());
+		let size_limit = T::MaxLength::get();
+
+		let needs_any_trim = |solution_pages: &mut Vec<SolutionOf<T>>| {
+			let size = solution_pages.encoded_size() as u32;
+			let needs_len_trim = size > size_limit;
+			// a reminder that we used to have weight trimming here, but not more!
+			let needs_weight_trim = false;
+			needs_weight_trim || needs_len_trim
+		};
+
+		// Note the solution might be partial. In either case, this is its least significant page.
+		let mut current_trimming_page = 0;
+		let current_trimming_page_stake_of = |current_trimming_page: usize| {
+			Box::new(move |voter_index: &SolutionVoterIndexOf<T>| -> VoteWeight {
+				paged_voters
+					.get(current_trimming_page)
+					.and_then(|page_voters| {
+						page_voters
+							.get((*voter_index).saturated_into::<usize>())
+							.map(|(_, s, _)| *s)
+					})
+					.unwrap_or_default()
+			})
+		};
+
+		let sort_current_trimming_page =
+			|current_trimming_page: usize, solution_pages: &mut Vec<SolutionOf<T>>| {
+				solution_pages.get_mut(current_trimming_page).map(|solution_page| {
+					let stake_of_fn = current_trimming_page_stake_of(current_trimming_page);
+					solution_page.sort(stake_of_fn)
+				});
+			};
+
+		let is_empty = |solution_pages: &Vec<SolutionOf<T>>| {
+			solution_pages.iter().all(|page| page.voter_count().is_zero())
+		};
+
+		if needs_any_trim(solution_pages) {
+			sort_current_trimming_page(current_trimming_page, solution_pages)
+		}
+
+		// Implementation note: we want `solution_pages` and `paged_voters` to remain in sync, so
+		// while one of the pages of `solution_pages` might become "empty" we prefer not removing
+		// it. This has a slight downside that even an empty pages consumes a few dozens of bytes,
+		// which we accept for code simplicity.
+
+		let mut removed = 0;
+		while needs_any_trim(solution_pages) && !is_empty(solution_pages) {
+			if let Some(removed_idx) =
+				solution_pages.get_mut(current_trimming_page).and_then(|page| {
+					let stake_of_fn = current_trimming_page_stake_of(current_trimming_page);
+					page.remove_weakest_sorted(&stake_of_fn)
+				}) {
+				miner_log!(
+					trace,
+					"removed voter at index {:?} of (un-pagified) page {} as the weakest due to weight/length limits.",
+					removed_idx,
+					current_trimming_page
+				);
+				// we removed one person, continue.
+				removed.saturating_inc();
+			} else {
+				// this page cannot support remove anymore. Try and go to the next page.
+				miner_log!(
+					debug,
+					"page {} seems to be fully empty now, moving to the next one",
+					current_trimming_page
+				);
+				let next_page = current_trimming_page.saturating_add(1);
+				if paged_voters.len() > next_page {
+					current_trimming_page = next_page;
+					sort_current_trimming_page(current_trimming_page, solution_pages);
+				} else {
+					miner_log!(
+						warn,
+						"no more pages to trim from at page {}, already trimmed",
+						current_trimming_page
+					);
+					break
+				}
+			}
+		}
+
+		Ok(removed)
+	}
+}
+
+/// A miner that is suited to work inside offchain worker environment.
+///
+/// This is parameterized by [`Config`], rather than [`MinerConfig`].
+pub(crate) struct OffchainWorkerMiner<T: Config>(sp_std::marker::PhantomData<T>);
+
+impl<T: Config> OffchainWorkerMiner<T> {
+	/// Storage key used to store the offchain worker running status.
+	pub(crate) const OFFCHAIN_LOCK: &'static [u8] = b"parity/multi-block-unsigned-election/lock";
+	/// Storage key used to store the last block number at which offchain worker ran.
+	const OFFCHAIN_LAST_BLOCK: &'static [u8] = b"parity/multi-block-unsigned-election";
+	/// Storage key used to cache the solution `call` and its snapshot fingerprint.
+	const OFFCHAIN_CACHED_CALL: &'static [u8] = b"parity/multi-block-unsigned-election/call";
+	/// The number of pages that the offchain worker miner will try and mine.
+	const MINING_PAGES: PageIndex = 1;
+
+	pub(crate) fn fetch_snapshot(
+		pages: PageIndex,
+	) -> Result<
+		(AllVoterPagesOf<T::MinerConfig>, BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>, u32),
+		OffchainMinerError<T>,
+	> {
+		// read the appropriate snapshot pages.
+		let desired_targets = crate::Snapshot::<T>::desired_targets()
+			.ok_or(MinerError::SnapshotUnAvailable(SnapshotType::DesiredTargets))?;
+		let all_targets = crate::Snapshot::<T>::targets()
+			.ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Targets))?;
+
+		// This is the range of voters that we are interested in. Mind the second `.rev`, it is
+		// super critical.
+		let voter_pages_range = (crate::Pallet::<T>::lsp()..crate::Pallet::<T>::msp() + 1)
+			.rev()
+			.take(pages as usize)
+			.rev();
+
+		sublog!(
+			debug,
+			"unsigned::base-miner",
+			"mining a solution with {} pages, voter snapshot range will be: {:?}",
+			pages,
+			voter_pages_range.clone().collect::<Vec<_>>()
+		);
+
+		// NOTE: if `pages (2) < T::Pages (3)`, at this point this vector will have length 2,
+		// with a layout of `[snapshot(1), snapshot(2)]`, namely the two most significant pages
+		//  of the snapshot.
+		let voter_pages: BoundedVec<_, T::Pages> = voter_pages_range
+			.map(|p| {
+				crate::Snapshot::<T>::voters(p)
+					.ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Voters(p)))
+			})
+			.collect::<Result<Vec<_>, _>>()?
+			.try_into()
+			.expect(
+				"`voter_pages_range` has `.take(pages)`; it must have length less than pages; it
+		must convert to `BoundedVec`; qed",
+			);
+
+		Ok((voter_pages, all_targets, desired_targets))
+	}
+
+	pub(crate) fn mine_solution(
+		pages: PageIndex,
+		do_reduce: bool,
+	) -> Result<PagedRawSolution<T::MinerConfig>, OffchainMinerError<T>> {
+		let (voter_pages, all_targets, desired_targets) = Self::fetch_snapshot(pages)?;
+		let round = crate::Pallet::<T>::round();
+		BaseMiner::<T::MinerConfig>::mine_solution(MineInput {
+			desired_targets,
+			all_targets,
+			voter_pages,
+			pages,
+			do_reduce,
+			round,
+		})
+		.map_err(Into::into)
+	}
+
+	/// Get a checked solution from the base miner, ensure unsigned-specific checks also pass, then
+	/// return an submittable call.
+	fn mine_checked_call() -> Result<Call<T>, OffchainMinerError<T>> {
+		// we always do reduce in the offchain worker miner.
+		let reduce = true;
+
+		// NOTE: we don't run any checks in the base miner, and run all of them via
+		// `Self::full_checks`.
+		let paged_solution = Self::mine_solution(Self::MINING_PAGES, reduce)
+			.map_err::<OffchainMinerError<T>, _>(Into::into)?;
+		// check the call fully, no fingerprinting.
+		let _ = Self::check_solution(&paged_solution, None, true, "mined")?;
+
+		let call: Call<T> =
+			Call::<T>::submit_unsigned { paged_solution: Box::new(paged_solution) }.into();
+
+		Ok(call)
+	}
+
+	/// Mine a new checked solution, cache it, and submit it back to the chain as an unsigned
+	/// transaction.
+	pub fn mine_check_save_submit() -> Result<(), OffchainMinerError<T>> {
+		sublog!(debug, "unsigned::ocw-miner", "miner attempting to compute an unsigned solution.");
+		let call = Self::mine_checked_call()?;
+		Self::save_solution(&call, crate::Snapshot::<T>::fingerprint())?;
+		Self::submit_call(call)
+	}
+
+	/// Check the solution, from the perspective of the offchain-worker miner:
+	///
+	/// 1. unsigned-specific checks.
+	/// 2. full-checks of the base miner
+	/// 	1. optionally feasibility check.
+	/// 	2. snapshot-independent checks.
+	/// 		1. optionally, snapshot fingerprint.
+	pub fn check_solution(
+		paged_solution: &PagedRawSolution<T::MinerConfig>,
+		maybe_snapshot_fingerprint: Option<T::Hash>,
+		do_feasibility: bool,
+		solution_type: &str,
+	) -> Result<(), OffchainMinerError<T>> {
+		// NOTE: we prefer cheap checks first, so first run unsigned checks.
+		Pallet::<T>::unsigned_specific_checks(paged_solution)?;
+		Self::base_check_solution(
+			paged_solution,
+			maybe_snapshot_fingerprint,
+			do_feasibility,
+			solution_type,
+		)
+	}
+
+	fn submit_call(call: Call<T>) -> Result<(), OffchainMinerError<T>> {
+		sublog!(
+			debug,
+			"unsigned::ocw-miner",
+			"miner submitting a solution as an unsigned transaction"
+		);
+		let xt = T::create_inherent(call.into());
+		frame_system::offchain::SubmitTransaction::<T, Call<T>>::submit_transaction(xt)
+			.map(|_| {
+				sublog!(
+					debug,
+					"unsigned::ocw-miner",
+					"miner submitted a solution as an unsigned transaction",
+				);
+			})
+			.map_err(|_| OffchainMinerError::PoolSubmissionFailed)
+	}
+
+	/// Check the solution, from the perspective of the base miner:
+	///
+	/// 1. snapshot-independent checks.
+	/// 	- with the fingerprint check being an optional step fo that.
+	/// 2. optionally, feasibility check.
+	///
+	/// In most cases, you should always use this either with `do_feasibility = true` or
+	/// `maybe_snapshot_fingerprint.is_some()`. Doing both could be an overkill. The snapshot
+	/// staying constant (which can be checked via the hash) is a string guarantee that the
+	/// feasibility still holds.
+	///
+	/// The difference between this and [`Self::check_solution`] is that this does not run unsigned
+	/// specific checks.
+	pub(crate) fn base_check_solution(
+		paged_solution: &PagedRawSolution<T::MinerConfig>,
+		maybe_snapshot_fingerprint: Option<T::Hash>,
+		do_feasibility: bool,
+		solution_type: &str, // TODO: remove
+	) -> Result<(), OffchainMinerError<T>> {
+		let _ = crate::Pallet::<T>::snapshot_independent_checks(
+			paged_solution,
+			maybe_snapshot_fingerprint,
+		)?;
+
+		if do_feasibility {
+			let (voter_pages, all_targets, desired_targets) =
+				Self::fetch_snapshot(paged_solution.solution_pages.len() as PageIndex)?;
+			let _ = BaseMiner::<T::MinerConfig>::check_feasibility(
+				&paged_solution,
+				&voter_pages,
+				&all_targets,
+				desired_targets,
+				solution_type,
+			)?;
+		}
+
+		Ok(())
+	}
+
+	/// Attempt to restore a solution from cache. Otherwise, compute it fresh. Either way,
+	/// submit if our call's score is greater than that of the cached solution.
+	pub fn restore_or_compute_then_maybe_submit() -> Result<(), OffchainMinerError<T>> {
+		sublog!(
+			debug,
+			"unsigned::ocw-miner",
+			"miner attempting to restore or compute an unsigned solution."
+		);
+
+		let call = Self::restore_solution()
+			.and_then(|(call, snapshot_fingerprint)| {
+				// ensure the cached call is still current before submitting
+				if let Call::submit_unsigned { paged_solution, .. } = &call {
+					// we check the snapshot fingerprint instead of doing a full feasibility.
+					OffchainWorkerMiner::<T>::check_solution(
+						paged_solution,
+						Some(snapshot_fingerprint),
+						false,
+						"restored"
+					).map_err::<OffchainMinerError<T>, _>(Into::into)?;
+					Ok(call)
+				} else {
+					Err(OffchainMinerError::SolutionCallInvalid)
+				}
+			})
+			.or_else::<OffchainMinerError<T>, _>(|error| {
+				use OffchainMinerError as OE;
+				use MinerError as ME;
+				use CommonError as CE;
+				match error {
+					OE::NoStoredSolution => {
+						// IFF, not present regenerate.
+						let call = Self::mine_checked_call()?;
+						Self::save_solution(&call, crate::Snapshot::<T>::fingerprint())?;
+						Ok(call)
+					},
+					OE::Common(ref e) => {
+						sublog!(
+							error,
+							"unsigned::ocw-miner",
+							"unsigned specific checks failed ({:?}) while restoring solution. This should never happen. clearing cache.",
+							e,
+						);
+						Self::clear_offchain_solution_cache();
+						Err(error)
+					},
+					OE::BaseMiner(ME::Feasibility(_))
+						| OE::BaseMiner(ME::Common(CE::WrongRound))
+						| OE::BaseMiner(ME::Common(CE::WrongFingerprint))
+					=> {
+						// note that failing `Feasibility` can only mean that the solution was
+						// computed over a snapshot that has changed due to a fork.
+						sublog!(warn, "unsigned::ocw-miner", "wiping infeasible solution ({:?}).", error);
+						// kill the "bad" solution.
+						Self::clear_offchain_solution_cache();
+
+						// .. then return the error as-is.
+						Err(error)
+					},
+					_ => {
+						sublog!(debug, "unsigned::ocw-miner", "unhandled error in restoring offchain solution {:?}", error);
+						// nothing to do. Return the error as-is.
+						Err(error)
+					},
+				}
+			})?;
+
+		Self::submit_call(call)
+	}
+
+	/// Checks if an execution of the offchain worker is permitted at the given block number, or
+	/// not.
+	///
+	/// This makes sure that
+	/// 1. we don't run on previous blocks in case of a re-org
+	/// 2. we don't run twice within a window of length `T::OffchainRepeat`.
+	///
+	/// Returns `Ok(())` if offchain worker limit is respected, `Err(reason)` otherwise. If
+	/// `Ok()` is returned, `now` is written in storage and will be used in further calls as the
+	/// baseline.
+	pub fn ensure_offchain_repeat_frequency(
+		now: BlockNumberFor<T>,
+	) -> Result<(), OffchainMinerError<T>> {
+		let threshold = T::OffchainRepeat::get();
+		let last_block = StorageValueRef::persistent(&Self::OFFCHAIN_LAST_BLOCK);
+
+		let mutate_stat = last_block.mutate::<_, &'static str, _>(
+			|maybe_head: Result<Option<BlockNumberFor<T>>, _>| {
+				match maybe_head {
+					Ok(Some(head)) if now < head => Err("fork."),
+					Ok(Some(head)) if now >= head && now <= head + threshold =>
+						Err("recently executed."),
+					Ok(Some(head)) if now > head + threshold => {
+						// we can run again now. Write the new head.
+						Ok(now)
+					},
+					_ => {
+						// value doesn't exists. Probably this node just booted up. Write, and
+						// run
+						Ok(now)
+					},
+				}
+			},
+		);
+
+		match mutate_stat {
+			// all good
+			Ok(_) => Ok(()),
+			// failed to write.
+			Err(MutateStorageError::ConcurrentModification(_)) => Err(OffchainMinerError::Lock(
+				"failed to write to offchain db (concurrent modification).",
+			)),
+			// fork etc.
+			Err(MutateStorageError::ValueFunctionFailed(why)) => Err(OffchainMinerError::Lock(why)),
+		}
+	}
+
+	/// Save a given call into OCW storage.
+	fn save_solution(
+		call: &Call<T>,
+		snapshot_fingerprint: T::Hash,
+	) -> Result<(), OffchainMinerError<T>> {
+		sublog!(debug, "unsigned::ocw-miner", "saving a call to the offchain storage.");
+		let storage = StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL);
+		match storage.mutate::<_, (), _>(|_| Ok((call.clone(), snapshot_fingerprint))) {
+			Ok(_) => Ok(()),
+			Err(MutateStorageError::ConcurrentModification(_)) =>
+				Err(OffchainMinerError::FailedToStoreSolution),
+			Err(MutateStorageError::ValueFunctionFailed(_)) => {
+				// this branch should be unreachable according to the definition of
+				// `StorageValueRef::mutate`: that function should only ever `Err` if the closure we
+				// pass it returns an error. however, for safety in case the definition changes, we
+				// do not optimize the branch away or panic.
+				Err(OffchainMinerError::FailedToStoreSolution)
+			},
+		}
+	}
+
+	/// Get a saved solution from OCW storage if it exists.
+	fn restore_solution() -> Result<(Call<T>, T::Hash), OffchainMinerError<T>> {
+		StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL)
+			.get()
+			.ok()
+			.flatten()
+			.ok_or(OffchainMinerError::NoStoredSolution)
+	}
+
+	/// Clear a saved solution from OCW storage.
+	fn clear_offchain_solution_cache() {
+		sublog!(debug, "unsigned::ocw-miner", "clearing offchain call cache storage.");
+		let mut storage = StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL);
+		storage.clear();
+	}
+
+	#[cfg(test)]
+	fn cached_solution() -> Option<Call<T>> {
+		StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL)
+			.get::<Call<T>>()
+			.unwrap()
+	}
+}
+
+// This will only focus on testing the internals of `maybe_trim_weight_and_len_works`.
+#[cfg(test)]
+mod trim_weight_length {
+	use super::*;
+	use crate::{mock::*, verifier::Verifier};
+	use frame_election_provider_support::TryFromUnboundedPagedSupports;
+	use sp_npos_elections::Support;
+
+	#[test]
+	fn trim_length() {
+		// This is just demonstration to show the normal election result with new votes, without any
+		// trimming.
+		ExtBuilder::unsigned().build_and_execute(|| {
+			let mut current_voters = Voters::get();
+			current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
+			Voters::set(current_voters);
+
+			roll_to_snapshot_created();
+			ensure_voters(3, 12);
+
+			let solution = mine_full_solution().unwrap();
+
+			assert_eq!(
+				solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(),
+				8
+			);
+
+			assert_eq!(solution.solution_pages.encoded_size(), 105);
+
+			load_mock_signed_and_start(solution);
+			let supports = roll_to_full_verification();
+
+			// a solution is queued.
+			assert!(VerifierPallet::queued_score().is_some());
+
+			assert_eq!(
+				supports,
+				vec![
+					// if we set any limit less than 105, 30 will be the first to leave.
+					vec![
+						(30, Support { total: 30, voters: vec![(30, 30)] }),
+						(40, Support { total: 40, voters: vec![(40, 40)] })
+					],
+					vec![
+						(30, Support { total: 11, voters: vec![(7, 7), (5, 2), (6, 2)] }),
+						(40, Support { total: 7, voters: vec![(5, 3), (6, 4)] })
+					],
+					vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })]
+				]
+				.try_from_unbounded_paged()
+				.unwrap()
+			);
+		});
+
+		ExtBuilder::unsigned().miner_max_length(104).build_and_execute(|| {
+			let mut current_voters = Voters::get();
+			current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who);
+			Voters::set(current_voters);
+
+			roll_to_snapshot_created();
+			ensure_voters(3, 12);
+
+			let solution = mine_full_solution().unwrap();
+
+			assert_eq!(
+				solution.solution_pages.iter().map(|page| page.voter_count()).sum::<usize>(),
+				7
+			);
+
+			assert_eq!(solution.solution_pages.encoded_size(), 99);
+
+			load_mock_signed_and_start(solution);
+			let supports = roll_to_full_verification();
+
+			// a solution is queued.
+			assert!(VerifierPallet::queued_score().is_some());
+
+			assert_eq!(
+				supports,
+				vec![
+					// 30 is gone!
+					vec![(40, Support { total: 40, voters: vec![(40, 40)] })],
+					vec![
+						(30, Support { total: 11, voters: vec![(7, 7), (5, 2), (6, 2)] }),
+						(40, Support { total: 7, voters: vec![(5, 3), (6, 4)] })
+					],
+					vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })]
+				]
+				.try_from_unbounded_paged()
+				.unwrap()
+			);
+		});
+	}
+}
+
+#[cfg(test)]
+mod base_miner {
+	use std::vec;
+
+	use super::*;
+	use crate::{mock::*, Snapshot};
+	use frame_election_provider_support::TryFromUnboundedPagedSupports;
+	use sp_npos_elections::Support;
+	use sp_runtime::PerU16;
+
+	#[test]
+	fn pagination_does_not_affect_score() {
+		let score_1 = ExtBuilder::unsigned()
+			.pages(1)
+			.voter_per_page(12)
+			.build_unchecked()
+			.execute_with(|| {
+				roll_to_snapshot_created();
+				mine_full_solution().unwrap().score
+			});
+		let score_2 = ExtBuilder::unsigned()
+			.pages(2)
+			.voter_per_page(6)
+			.build_unchecked()
+			.execute_with(|| {
+				roll_to_snapshot_created();
+				mine_full_solution().unwrap().score
+			});
+		let score_3 = ExtBuilder::unsigned()
+			.pages(3)
+			.voter_per_page(4)
+			.build_unchecked()
+			.execute_with(|| {
+				roll_to_snapshot_created();
+				mine_full_solution().unwrap().score
+			});
+
+		assert_eq!(score_1, score_2);
+		assert_eq!(score_2, score_3);
+	}
+
+	#[test]
+	fn mine_solution_single_page_works() {
+		ExtBuilder::unsigned().pages(1).voter_per_page(8).build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			ensure_voters(1, 8);
+			ensure_targets(1, 4);
+
+			assert_eq!(
+				Snapshot::<Runtime>::voters(0)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![1, 2, 3, 4, 5, 6, 7, 8]
+			);
+
+			let paged = mine_full_solution().unwrap();
+			assert_eq!(paged.solution_pages.len(), 1);
+
+			// this solution must be feasible and submittable.
+			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true, "mined")
+				.unwrap();
+
+			// now do a realistic full verification
+			load_mock_signed_and_start(paged.clone());
+			let supports = roll_to_full_verification();
+
+			assert_eq!(
+				supports,
+				vec![vec![
+					(10, Support { total: 30, voters: vec![(1, 10), (8, 10), (4, 5), (5, 5)] }),
+					(
+						40,
+						Support {
+							total: 40,
+							voters: vec![(2, 10), (3, 10), (6, 10), (4, 5), (5, 5)]
+						}
+					)
+				]]
+				.try_from_unbounded_paged()
+				.unwrap()
+			);
+
+			// NOTE: this is the same as the score of any other test that contains the first 8
+			// voters, we already test for this in `pagination_does_not_affect_score`.
+			assert_eq!(
+				paged.score,
+				ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 }
+			);
+		})
+	}
+
+	#[test]
+	fn mine_solution_double_page_works() {
+		ExtBuilder::unsigned().pages(2).voter_per_page(4).build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			// 2 pages of 8 voters
+			ensure_voters(2, 8);
+			// 1 page of 4 targets
+			ensure_targets(1, 4);
+
+			// voters in pages. note the reverse page index.
+			assert_eq!(
+				Snapshot::<Runtime>::voters(0)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![5, 6, 7, 8]
+			);
+			assert_eq!(
+				Snapshot::<Runtime>::voters(1)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![1, 2, 3, 4]
+			);
+			// targets in pages.
+			assert_eq!(Snapshot::<Runtime>::targets().unwrap(), vec![10, 20, 30, 40]);
+			let paged = mine_full_solution().unwrap();
+
+			assert_eq!(
+				paged.solution_pages,
+				vec![
+					TestNposSolution {
+						// voter 6 (index 1) is backing 40 (index 3).
+						// voter 8 (index 3) is backing 10 (index 0)
+						votes1: vec![(1, 3), (3, 0)],
+						// voter 5 (index 0) is backing 40 (index 10) and 10 (index 0)
+						votes2: vec![(0, [(0, PerU16::from_parts(32768))], 3)],
+						..Default::default()
+					},
+					TestNposSolution {
+						// voter 1 (index 0) is backing 10 (index 0)
+						// voter 2 (index 1) is backing 40 (index 3)
+						// voter 3 (index 2) is backing 40 (index 3)
+						votes1: vec![(0, 0), (1, 3), (2, 3)],
+						// voter 4 (index 3) is backing 40 (index 10) and 10 (index 0)
+						votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)],
+						..Default::default()
+					},
+				]
+			);
+
+			// this solution must be feasible and submittable.
+			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, false, "mined")
+				.unwrap();
+
+			// it must also be verified in the verifier
+			load_mock_signed_and_start(paged.clone());
+			let supports = roll_to_full_verification();
+
+			assert_eq!(
+				supports,
+				vec![
+					// page0, supports from voters 5, 6, 7, 8
+					vec![
+						(10, Support { total: 15, voters: vec![(8, 10), (5, 5)] }),
+						(40, Support { total: 15, voters: vec![(6, 10), (5, 5)] })
+					],
+					// page1 supports from voters 1, 2, 3, 4
+					vec![
+						(10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }),
+						(40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] })
+					]
+				]
+				.try_from_unbounded_paged()
+				.unwrap()
+			);
+
+			assert_eq!(
+				paged.score,
+				ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 }
+			);
+		})
+	}
+
+	#[test]
+	fn mine_solution_triple_page_works() {
+		ExtBuilder::unsigned().pages(3).voter_per_page(4).build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			ensure_voters(3, 12);
+			ensure_targets(1, 4);
+
+			// voters in pages. note the reverse page index.
+			assert_eq!(
+				Snapshot::<Runtime>::voters(2)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![1, 2, 3, 4]
+			);
+			assert_eq!(
+				Snapshot::<Runtime>::voters(1)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![5, 6, 7, 8]
+			);
+			assert_eq!(
+				Snapshot::<Runtime>::voters(0)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![10, 20, 30, 40]
+			);
+
+			let paged = mine_full_solution().unwrap();
+			assert_eq!(
+				paged.solution_pages,
+				vec![
+					TestNposSolution { votes1: vec![(2, 2), (3, 3)], ..Default::default() },
+					TestNposSolution {
+						votes1: vec![(2, 2)],
+						votes2: vec![
+							(0, [(2, PerU16::from_parts(32768))], 3),
+							(1, [(2, PerU16::from_parts(32768))], 3)
+						],
+						..Default::default()
+					},
+					TestNposSolution {
+						votes1: vec![(2, 3), (3, 3)],
+						votes2: vec![(1, [(2, PerU16::from_parts(32768))], 3)],
+						..Default::default()
+					},
+				]
+			);
+
+			// this solution must be feasible and submittable.
+			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true, "mined")
+				.unwrap();
+			// now do a realistic full verification
+			load_mock_signed_and_start(paged.clone());
+			let supports = roll_to_full_verification();
+
+			assert_eq!(
+				supports,
+				vec![
+					// page 0: self-votes.
+					vec![
+						(30, Support { total: 30, voters: vec![(30, 30)] }),
+						(40, Support { total: 40, voters: vec![(40, 40)] })
+					],
+					// page 1: 5, 6, 7, 8
+					vec![
+						(30, Support { total: 20, voters: vec![(7, 10), (5, 5), (6, 5)] }),
+						(40, Support { total: 10, voters: vec![(5, 5), (6, 5)] })
+					],
+					// page 2: 1, 2, 3, 4
+					vec![
+						(30, Support { total: 5, voters: vec![(2, 5)] }),
+						(40, Support { total: 25, voters: vec![(3, 10), (4, 10), (2, 5)] })
+					]
+				]
+				.try_from_unbounded_paged()
+				.unwrap()
+			);
+
+			assert_eq!(
+				paged.score,
+				ElectionScore { minimal_stake: 55, sum_stake: 130, sum_stake_squared: 8650 }
+			);
+		})
+	}
+
+	#[test]
+	fn mine_solution_choses_most_significant_pages() {
+		ExtBuilder::unsigned().pages(2).voter_per_page(4).build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			ensure_voters(2, 8);
+			ensure_targets(1, 4);
+
+			// these folks should be ignored safely.
+			assert_eq!(
+				Snapshot::<Runtime>::voters(0)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![5, 6, 7, 8]
+			);
+			// voters in pages 1, this is the most significant page.
+			assert_eq!(
+				Snapshot::<Runtime>::voters(1)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![1, 2, 3, 4]
+			);
+
+			// now we ask for just 1 page of solution.
+			let paged = mine_solution(1).unwrap();
+
+			assert_eq!(
+				paged.solution_pages,
+				vec![TestNposSolution {
+					// voter 1 (index 0) is backing 10 (index 0)
+					// voter 2 (index 1) is backing 40 (index 3)
+					// voter 3 (index 2) is backing 40 (index 3)
+					votes1: vec![(0, 0), (1, 3), (2, 3)],
+					// voter 4 (index 3) is backing 40 (index 10) and 10 (index 0)
+					votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)],
+					..Default::default()
+				}]
+			);
+
+			// this solution must be feasible and submittable.
+			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true, "mined")
+				.unwrap();
+			// now do a realistic full verification.
+			load_mock_signed_and_start(paged.clone());
+			let supports = roll_to_full_verification();
+
+			assert_eq!(
+				supports,
+				vec![
+					// page0: non existent.
+					vec![],
+					// page1 supports from voters 1, 2, 3, 4
+					vec![
+						(10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }),
+						(40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] })
+					]
+				]
+				.try_from_unbounded_paged()
+				.unwrap()
+			);
+
+			assert_eq!(
+				paged.score,
+				ElectionScore { minimal_stake: 15, sum_stake: 40, sum_stake_squared: 850 }
+			);
+		})
+	}
+
+	#[test]
+	fn mine_solution_2_out_of_3_pages() {
+		ExtBuilder::unsigned().pages(3).voter_per_page(4).build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			ensure_voters(3, 12);
+			ensure_targets(1, 4);
+
+			assert_eq!(
+				Snapshot::<Runtime>::voters(0)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![10, 20, 30, 40]
+			);
+			assert_eq!(
+				Snapshot::<Runtime>::voters(1)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![5, 6, 7, 8]
+			);
+			assert_eq!(
+				Snapshot::<Runtime>::voters(2)
+					.unwrap()
+					.into_iter()
+					.map(|(x, _, _)| x)
+					.collect::<Vec<_>>(),
+				vec![1, 2, 3, 4]
+			);
+
+			// now we ask for just 1 page of solution.
+			let paged = mine_solution(2).unwrap();
+
+			// this solution must be feasible and submittable.
+			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true, "mined")
+				.unwrap();
+
+			assert_eq!(
+				paged.solution_pages,
+				vec![
+					// this can be "pagified" to snapshot at index 1, which contains 5, 6, 7, 8
+					// in which:
+					// 6 (index:1) votes for 40 (index:3)
+					// 8 (index:1) votes for 10 (index:0)
+					// 5 votes for both 10 and 40
+					TestNposSolution {
+						votes1: vec![(1, 3), (3, 0)],
+						votes2: vec![(0, [(0, PerU16::from_parts(32768))], 3)],
+						..Default::default()
+					},
+					// this can be 'pagified" to snapshot at index 2, which contains 1, 2, 3, 4
+					// in which:
+					// 1 (index:0) votes for 10 (index:0)
+					// 2 (index:1) votes for 40 (index:3)
+					// 3 (index:2) votes for 40 (index:3)
+					// 4 votes for both 10 and 40
+					TestNposSolution {
+						votes1: vec![(0, 0), (1, 3), (2, 3)],
+						votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)],
+						..Default::default()
+					}
+				]
+			);
+
+			// this solution must be feasible and submittable.
+			OffchainWorkerMiner::<Runtime>::base_check_solution(&paged, None, true, "mined")
+				.unwrap();
+			// now do a realistic full verification.
+			load_mock_signed_and_start(paged.clone());
+			let supports = roll_to_full_verification();
+
+			assert_eq!(
+				supports,
+				vec![
+					// empty page 0.
+					vec![],
+					// supports from voters 5, 6, 7, 8
+					vec![
+						(10, Support { total: 15, voters: vec![(8, 10), (5, 5)] }),
+						(40, Support { total: 15, voters: vec![(6, 10), (5, 5)] })
+					],
+					// supports from voters 1, 2, 3, 4
+					vec![
+						(10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }),
+						(40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] })
+					]
+				]
+				.try_from_unbounded_paged()
+				.unwrap()
+			);
+
+			assert_eq!(
+				paged.score,
+				ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 }
+			);
+		})
+	}
+
+	#[test]
+	fn can_reduce_solution() {
+		ExtBuilder::unsigned().build_and_execute(|| {
+			roll_to_snapshot_created();
+			let full_edges = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), false)
+				.unwrap()
+				.solution_pages
+				.iter()
+				.fold(0, |acc, x| acc + x.edge_count());
+			let reduced_edges = OffchainWorkerMiner::<Runtime>::mine_solution(Pages::get(), true)
+				.unwrap()
+				.solution_pages
+				.iter()
+				.fold(0, |acc, x| acc + x.edge_count());
+
+			assert!(reduced_edges < full_edges, "{} < {} not fulfilled", reduced_edges, full_edges);
+		})
+	}
+
+	#[test]
+	fn trim_backers_per_page_works() {
+		ExtBuilder::unsigned()
+			.max_backers_per_winner(5)
+			.voter_per_page(8)
+			.build_and_execute(|| {
+				// 10 and 40 are the default winners, we add a lot more votes to them.
+				for i in 100..105 {
+					VOTERS.with(|v| v.borrow_mut().push((i, i - 96, vec![10].try_into().unwrap())));
+				}
+				roll_to_snapshot_created();
+
+				ensure_voters(3, 17);
+
+				// now we let the miner mine something for us..
+				let paged = mine_full_solution().unwrap();
+				load_mock_signed_and_start(paged.clone());
+
+				// this must be correct
+				let supports = roll_to_full_verification();
+
+				// 10 has no more than 5 backings, and from the new voters that we added in this
+				// test, the most staked ones stayed (103, 104) and the rest trimmed.
+				assert_eq!(
+					supports,
+					vec![
+						// 1 backing for 10
+						vec![(10, Support { total: 8, voters: vec![(104, 8)] })],
+						// 2 backings for 10
+						vec![
+							(10, Support { total: 17, voters: vec![(10, 10), (103, 7)] }),
+							(40, Support { total: 40, voters: vec![(40, 40)] })
+						],
+						// 20 backings for 10
+						vec![
+							(10, Support { total: 20, voters: vec![(1, 10), (8, 10)] }),
+							(
+								40,
+								Support {
+									total: 40,
+									voters: vec![(2, 10), (3, 10), (5, 10), (6, 10)]
+								}
+							)
+						]
+					]
+					.try_from_unbounded_paged()
+					.unwrap()
+				);
+			})
+	}
+
+	#[test]
+	#[should_panic]
+	fn trim_backers_final_works() {
+		ExtBuilder::unsigned()
+			.max_backers_per_winner_final(3)
+			.pages(3)
+			.build_and_execute(|| {
+				roll_to_snapshot_created();
+
+				let paged = mine_full_solution().unwrap();
+				load_mock_signed_and_start(paged.clone());
+
+				// this must be correct
+				let _supports = roll_to_full_verification();
+
+				assert_eq!(
+					verifier_events(),
+					vec![
+						verifier::Event::Verified(2, 2),
+						verifier::Event::Verified(1, 2),
+						verifier::Event::Verified(0, 2),
+						verifier::Event::VerificationFailed(
+							0,
+							verifier::FeasibilityError::FailedToBoundSupport
+						)
+					]
+				);
+				todo!("miner should trim max backers final, maybe");
+
+				// assert_eq!(
+				// 	supports,
+				// 	vec![
+				// 		// 1 backing for 10
+				// 		vec![(10, Support { total: 8, voters: vec![(104, 8)] })],
+				// 		// 2 backings for 10
+				// 		vec![
+				// 			(10, Support { total: 17, voters: vec![(10, 10), (103, 7)] }),
+				// 			(40, Support { total: 40, voters: vec![(40, 40)] })
+				// 		],
+				// 		// 20 backings for 10
+				// 		vec![
+				// 			(10, Support { total: 20, voters: vec![(1, 10), (8, 10)] }),
+				// 			(
+				// 				40,
+				// 				Support {
+				// 					total: 40,
+				// 					voters: vec![(2, 10), (3, 10), (4, 10), (6, 10)]
+				// 				}
+				// 			)
+				// 		]
+				// 	]
+				// 	.try_from_unbounded_paged()
+				// 	.unwrap()
+				// );
+			});
+	}
+}
+
+#[cfg(test)]
+mod offchain_worker_miner {
+	use crate::{verifier::Verifier, CommonError};
+	use frame_support::traits::Hooks;
+	use sp_runtime::offchain::storage_lock::{BlockAndTime, StorageLock};
+
+	use super::*;
+	use crate::mock::*;
+
+	#[test]
+	fn lock_prevents_frequent_execution() {
+		let (mut ext, _) = ExtBuilder::unsigned().build_offchainify();
+		ext.execute_with_sanity_checks(|| {
+			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
+
+			// first execution -- okay.
+			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(25).is_ok());
+
+			// next block: rejected.
+			assert_noop!(
+				OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(26),
+				OffchainMinerError::Lock("recently executed.")
+			);
+
+			// allowed after `OFFCHAIN_REPEAT`
+			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(
+				(26 + offchain_repeat).into()
+			)
+			.is_ok());
+
+			// a fork like situation: re-execute last 3.
+			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(
+				(26 + offchain_repeat - 3).into()
+			)
+			.is_err());
+			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(
+				(26 + offchain_repeat - 2).into()
+			)
+			.is_err());
+			assert!(OffchainWorkerMiner::<Runtime>::ensure_offchain_repeat_frequency(
+				(26 + offchain_repeat - 1).into()
+			)
+			.is_err());
+		})
+	}
+
+	#[test]
+	fn lock_released_after_successful_execution() {
+		// first, ensure that a successful execution releases the lock
+		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
+		ext.execute_with_sanity_checks(|| {
+			let guard = StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_LOCK);
+			let last_block =
+				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_LAST_BLOCK);
+
+			roll_to(25);
+			assert!(MultiBlock::current_phase().is_unsigned());
+
+			// initially, the lock is not set.
+			assert!(guard.get::<bool>().unwrap().is_none());
+
+			// a successful a-z execution.
+			UnsignedPallet::offchain_worker(25);
+			assert_eq!(pool.read().transactions.len(), 1);
+
+			// afterwards, the lock is not set either..
+			assert!(guard.get::<bool>().unwrap().is_none());
+			assert_eq!(last_block.get::<BlockNumber>().unwrap(), Some(25));
+		});
+	}
+
+	#[test]
+	fn lock_prevents_overlapping_execution() {
+		// ensure that if the guard is in hold, a new execution is not allowed.
+		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
+		ext.execute_with_sanity_checks(|| {
+			roll_to(25);
+			assert!(MultiBlock::current_phase().is_unsigned());
+
+			// artificially set the value, as if another thread is mid-way.
+			let mut lock = StorageLock::<BlockAndTime<System>>::with_block_deadline(
+				OffchainWorkerMiner::<Runtime>::OFFCHAIN_LOCK,
+				UnsignedPhase::get().saturated_into(),
+			);
+			let guard = lock.lock();
+
+			// nothing submitted.
+			UnsignedPallet::offchain_worker(25);
+			assert_eq!(pool.read().transactions.len(), 0);
+			UnsignedPallet::offchain_worker(26);
+			assert_eq!(pool.read().transactions.len(), 0);
+
+			drop(guard);
+
+			// 🎉 !
+			UnsignedPallet::offchain_worker(25);
+			assert_eq!(pool.read().transactions.len(), 1);
+		});
+	}
+
+	#[test]
+	fn initial_ocw_runs_and_saves_new_cache() {
+		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
+		ext.execute_with_sanity_checks(|| {
+			roll_to(25);
+			assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+
+			let last_block =
+				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_LAST_BLOCK);
+			let cache =
+				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
+
+			assert_eq!(last_block.get::<BlockNumber>(), Ok(None));
+			assert_eq!(cache.get::<crate::unsigned::Call<Runtime>>(), Ok(None));
+
+			// creates, caches, submits without expecting previous cache value
+			UnsignedPallet::offchain_worker(25);
+			assert_eq!(pool.read().transactions.len(), 1);
+
+			assert_eq!(last_block.get::<BlockNumber>(), Ok(Some(25)));
+			assert!(matches!(cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
+		})
+	}
+
+	#[test]
+	fn ocw_pool_submission_works() {
+		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
+		ext.execute_with_sanity_checks(|| {
+			roll_to_with_ocw(25, None);
+			assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+			// OCW must have submitted now
+
+			let encoded = pool.read().transactions[0].clone();
+			let extrinsic: Extrinsic = codec::Decode::decode(&mut &*encoded).unwrap();
+			let call = extrinsic.function;
+			assert!(matches!(
+				call,
+				crate::mock::RuntimeCall::UnsignedPallet(
+					crate::unsigned::Call::submit_unsigned { .. }
+				)
+			));
+		})
+	}
+
+	#[test]
+	fn resubmits_after_offchain_repeat() {
+		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
+		ext.execute_with_sanity_checks(|| {
+			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
+			roll_to(25);
+			assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25));
+
+			assert!(OffchainWorkerMiner::<Runtime>::cached_solution().is_none());
+			// creates, caches, submits without expecting previous cache value
+			UnsignedPallet::offchain_worker(25);
+			assert_eq!(pool.read().transactions.len(), 1);
+			let tx_cache = pool.read().transactions[0].clone();
+			// assume that the tx has been processed
+			pool.try_write().unwrap().transactions.clear();
+
+			// attempts to resubmit the tx after the threshold has expired.
+			UnsignedPallet::offchain_worker(25 + 1 + offchain_repeat);
+			assert_eq!(pool.read().transactions.len(), 1);
+
+			// resubmitted tx is identical to first submission
+			let tx = &pool.read().transactions[0];
+			assert_eq!(&tx_cache, tx);
+		})
+	}
+
+	#[test]
+	fn regenerates_and_resubmits_after_offchain_repeat_if_no_cache() {
+		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
+		ext.execute_with_sanity_checks(|| {
+			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
+			roll_to(25);
+
+			assert!(OffchainWorkerMiner::<Runtime>::cached_solution().is_none());
+			// creates, caches, submits without expecting previous cache value.
+			UnsignedPallet::offchain_worker(25);
+			assert_eq!(pool.read().transactions.len(), 1);
+			let tx_cache = pool.read().transactions[0].clone();
+			// assume that the tx has been processed
+			pool.try_write().unwrap().transactions.clear();
+
+			// remove the cached submitted tx.
+			// this ensures that when the resubmit window rolls around, we're ready to regenerate
+			// from scratch if necessary
+			let mut call_cache =
+				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
+			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
+			call_cache.clear();
+
+			// attempts to resubmit the tx after the threshold has expired
+			UnsignedPallet::offchain_worker(25 + 1 + offchain_repeat);
+			assert_eq!(pool.read().transactions.len(), 1);
+
+			// resubmitted tx is identical to first submission
+			let tx = &pool.read().transactions[0];
+			assert_eq!(&tx_cache, tx);
+		})
+	}
+
+	#[test]
+	fn altering_snapshot_invalidates_solution_cache() {
+		// by infeasible, we mean here that if the snapshot fingerprint has changed.
+		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
+		ext.execute_with_sanity_checks(|| {
+			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
+			roll_to_with_ocw(25, None);
+
+			// something is submitted..
+			assert_eq!(pool.read().transactions.len(), 1);
+			pool.try_write().unwrap().transactions.clear();
+
+			// ..and cached
+			let call_cache =
+				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
+			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
+
+			// now change the snapshot, ofc this is rare in reality. This makes the cached call
+			// infeasible.
+			assert_eq!(crate::Snapshot::<Runtime>::targets().unwrap(), vec![10, 20, 30, 40]);
+			let pre_fingerprint = crate::Snapshot::<Runtime>::fingerprint();
+			crate::Snapshot::<Runtime>::remove_target(0);
+			let post_fingerprint = crate::Snapshot::<Runtime>::fingerprint();
+			assert_eq!(crate::Snapshot::<Runtime>::targets().unwrap(), vec![20, 30, 40]);
+			assert_ne!(pre_fingerprint, post_fingerprint);
+
+			// now run ocw again
+			roll_to_with_ocw(25 + offchain_repeat + 1, None);
+			// nothing is submitted this time..
+			assert_eq!(pool.read().transactions.len(), 0);
+			// .. and the cache is gone.
+			assert_eq!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(None));
+
+			// upon the next run, we re-generate and submit something fresh again.
+			roll_to_with_ocw(25 + offchain_repeat + offchain_repeat + 2, None);
+			assert_eq!(pool.read().transactions.len(), 1);
+			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
+		})
+	}
+
+	#[test]
+	fn wont_resubmit_if_weak_score() {
+		// common case, if the score is weak, don't bother with anything, ideally check from the
+		// logs that we don't run feasibility in this call path. Score check must come before.
+		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
+		ext.execute_with_sanity_checks(|| {
+			let offchain_repeat = <Runtime as crate::unsigned::Config>::OffchainRepeat::get();
+			// unfortunately there's no pretty way to run the ocw code such that it generates a
+			// weak, but correct solution. We just write it to cache directly.
+
+			roll_to_with_ocw(25, Some(pool.clone()));
+
+			// something is submitted..
+			assert_eq!(pool.read().transactions.len(), 1);
+
+			// ..and cached
+			let call_cache =
+				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
+			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
+
+			// and replace it with something weak.
+			let weak_solution = raw_paged_from_supports(
+				vec![vec![(40, Support { total: 10, voters: vec![(3, 10)] })]],
+				0,
+			);
+			let weak_call = crate::unsigned::Call::<T>::submit_unsigned {
+				paged_solution: Box::new(weak_solution),
+			};
+			call_cache.set(&weak_call);
+
+			// run again
+			roll_to_with_ocw(25 + offchain_repeat + 1, Some(pool.clone()));
+			// nothing is submitted this time..
+			assert_eq!(pool.read().transactions.len(), 0);
+			// .. and the cache IS STILL THERE!
+			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
+		})
+	}
+
+	#[test]
+	fn ocw_submission_e2e_works() {
+		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
+		ext.execute_with_sanity_checks(|| {
+			assert!(VerifierPallet::queued_score().is_none());
+			roll_to_with_ocw(25 + 1, Some(pool.clone()));
+			assert!(VerifierPallet::queued_score().is_some());
+
+			// call is cached.
+			let call_cache =
+				StorageValueRef::persistent(&OffchainWorkerMiner::<Runtime>::OFFCHAIN_CACHED_CALL);
+			assert!(matches!(call_cache.get::<crate::unsigned::Call<Runtime>>(), Ok(Some(_))));
+
+			// pool is empty
+			assert_eq!(pool.read().transactions.len(), 0);
+		})
+	}
+
+	#[test]
+	fn multi_page_ocw_e2e_submits_and_queued_msp_only() {
+		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
+		ext.execute_with_sanity_checks(|| {
+			assert!(VerifierPallet::queued_score().is_none());
+
+			roll_to_with_ocw(25 + 1, Some(pool.clone()));
+
+			assert_eq!(
+				multi_block_events(),
+				vec![
+					crate::Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) },
+					crate::Event::PhaseTransitioned {
+						from: Phase::Snapshot(0),
+						to: Phase::Unsigned(25)
+					}
+				]
+			);
+			assert_eq!(
+				verifier_events(),
+				vec![
+					crate::verifier::Event::Verified(2, 2),
+					crate::verifier::Event::Queued(
+						ElectionScore { minimal_stake: 15, sum_stake: 40, sum_stake_squared: 850 },
+						None
+					)
+				]
+			);
+
+			assert!(VerifierPallet::queued_score().is_some());
+
+			// pool is empty
+			assert_eq!(pool.read().transactions.len(), 0);
+		})
+	}
+
+	#[test]
+	fn will_not_mine_if_not_enough_winners() {
+		// also see `trim_weight_too_much_makes_solution_invalid`.
+		let (mut ext, _) = ExtBuilder::unsigned().desired_targets(77).build_offchainify();
+		ext.execute_with_sanity_checks(|| {
+			roll_to_unsigned_open();
+			ensure_voters(3, 12);
+
+			// beautiful errors, isn't it?
+			assert_eq!(
+				OffchainWorkerMiner::<Runtime>::mine_checked_call().unwrap_err(),
+				OffchainMinerError::Common(CommonError::WrongWinnerCount)
+			);
+		});
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs b/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs
new file mode 100644
index 00000000000..ca6766efd90
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs
@@ -0,0 +1,633 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! ## The unsigned phase, and its miner.
+//!
+//! This pallet deals with unsigned submissions. These are backup, single page submissions from
+//! validators.
+//!
+//! This pallet has two miners:
+//!
+//! * [`unsigned::miner::BaseMiner`], which is the basis of how the mining works. It can be used by
+//!   a separate crate by providing an implementation of [`unsigned::miner::MinerConfig`]. And, it
+//!   is used in:
+//! * `Miner::OffchainWorkerMiner`, which is a specialized miner for the single page mining by
+//!   validators in the `offchain_worker` hook.
+//!
+//! ## Future Idea: Multi-Page unsigned submission
+//!
+//! the following is the idea of how to implement multi-page unsigned, which we don't have.
+//!
+//! ## Multi-block unsigned submission
+//!
+//! The process of allowing validators to coordinate to submit a multi-page solution is new to this
+//! pallet, and non-existent in the multi-phase pallet. The process is as follows:
+//!
+//! All validators will run their miners and compute the full paginated solution. They submit all
+//! pages as individual unsigned transactions to their local tx-pool.
+//!
+//! Upon validation, if any page is now present the corresponding transaction is dropped.
+//!
+//! At each block, the first page that may be valid is included as a high priority operational
+//! transaction. This page is validated on the fly to be correct. Since this transaction is sourced
+//! from a validator, we can panic if they submit an invalid transaction.
+//!
+//! Then, once the final page is submitted, some extra checks are done, as explained in
+//! [`crate::verifier`]:
+//!
+//! 1. bounds
+//! 2. total score
+//!
+//! These checks might still fail. If they do, the solution is dropped. At this point, we don't know
+//! which validator may have submitted a slightly-faulty solution.
+//!
+//! In order to prevent this, the validation process always includes a check to ensure all of the
+//! previous pages that have been submitted match what the local validator has computed. If they
+//! match, the validator knows that they are putting skin in a game that is valid.
+//!
+//! If any bad paged are detected, the next validator can bail. This process means:
+//!
+//! * As long as all validators are honest, and run the same miner code, a correct solution is
+//!   found.
+//! * As little as one malicious validator can stall the process, but no one is accidentally
+//!   slashed, and no panic happens.
+//!
+//! A future improvement should keep track of submitters, and report a slash if it occurs. Or, if
+//! the signed process is bullet-proof, we can be okay with the status quo.
+
+/// Export weights
+pub use crate::weights::measured::pallet_election_provider_multi_block_unsigned::*;
+/// Exports of this pallet
+pub use pallet::*;
+#[cfg(feature = "runtime-benchmarks")]
+mod benchmarking;
+
+/// The miner.
+pub mod miner;
+
+#[frame_support::pallet]
+mod pallet {
+	use super::WeightInfo;
+	use crate::{
+		types::*,
+		unsigned::miner::{self},
+		verifier::Verifier,
+		CommonError,
+	};
+	use frame_support::pallet_prelude::*;
+	use frame_system::{offchain::CreateInherent, pallet_prelude::*};
+	use sp_runtime::traits::SaturatedConversion;
+	use sp_std::prelude::*;
+
+	/// convert a [`crate::CommonError`] to a custom InvalidTransaction with the inner code being
+	/// the index of the variant.
+	fn base_error_to_invalid(error: CommonError) -> InvalidTransaction {
+		let index = error.encode().pop().unwrap_or(0);
+		InvalidTransaction::Custom(index)
+	}
+
+	pub(crate) type UnsignedWeightsOf<T> = <T as Config>::WeightInfo;
+
+	#[pallet::config]
+	#[pallet::disable_frame_system_supertrait_check]
+	pub trait Config: crate::Config + CreateInherent<Call<Self>> {
+		/// The repeat threshold of the offchain worker.
+		///
+		/// For example, if it is 5, that means that at least 5 blocks will elapse between attempts
+		/// to submit the worker's solution.
+		type OffchainRepeat: Get<BlockNumberFor<Self>>;
+
+		/// The solver used in hte offchain worker miner
+		type OffchainSolver: frame_election_provider_support::NposSolver<
+			AccountId = Self::AccountId,
+		>;
+
+		/// The priority of the unsigned transaction submitted in the unsigned-phase
+		type MinerTxPriority: Get<TransactionPriority>;
+
+		/// Runtime weight information of this pallet.
+		type WeightInfo: WeightInfo;
+	}
+
+	#[pallet::pallet]
+	pub struct Pallet<T>(PhantomData<T>);
+
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {
+		/// Submit an unsigned solution.
+		///
+		/// This works very much like an inherent, as only the validators are permitted to submit
+		/// anything. By default validators will compute this call in their `offchain_worker` hook
+		/// and try and submit it back.
+		///
+		/// This is different from signed page submission mainly in that the solution page is
+		/// verified on the fly.
+		#[pallet::weight((UnsignedWeightsOf::<T>::submit_unsigned(), DispatchClass::Operational))]
+		#[pallet::call_index(0)]
+		pub fn submit_unsigned(
+			origin: OriginFor<T>,
+			paged_solution: Box<PagedRawSolution<T::MinerConfig>>,
+		) -> DispatchResultWithPostInfo {
+			ensure_none(origin)?;
+			// TODO: remove the panic from this function for now.
+			let error_message = "Invalid unsigned submission must produce invalid block and \
+				 deprive validator from their authoring reward.";
+
+			// phase, round, claimed score, page-count and hash are checked in pre-dispatch. we
+			// don't check them here anymore.
+			debug_assert!(Self::validate_unsigned_checks(&paged_solution).is_ok());
+
+			let only_page = paged_solution
+				.solution_pages
+				.into_inner()
+				.pop()
+				.expect("length of `solution_pages` is always `1`, can be popped; qed.");
+			let claimed_score = paged_solution.score;
+			// `verify_synchronous` will internall queue and save the solution, we don't need to do
+			// it.
+			let _supports = <T::Verifier as Verifier>::verify_synchronous(
+				only_page,
+				claimed_score,
+				// must be valid against the msp
+				crate::Pallet::<T>::msp(),
+			)
+			.expect(error_message);
+
+			sublog!(
+				info,
+				"unsigned",
+				"queued an unsigned solution with score {:?} and {} winners",
+				claimed_score,
+				_supports.len()
+			);
+
+			Ok(None.into())
+		}
+	}
+
+	#[pallet::validate_unsigned]
+	impl<T: Config> ValidateUnsigned for Pallet<T> {
+		type Call = Call<T>;
+		fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity {
+			if let Call::submit_unsigned { paged_solution, .. } = call {
+				match source {
+					TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ },
+					_ => return InvalidTransaction::Call.into(),
+				}
+
+				let _ = Self::validate_unsigned_checks(paged_solution.as_ref())
+					.map_err(|err| {
+						sublog!(
+							debug,
+							"unsigned",
+							"unsigned transaction validation failed due to {:?}",
+							err
+						);
+						err
+					})
+					.map_err(base_error_to_invalid)?;
+
+				ValidTransaction::with_tag_prefix("OffchainElection")
+					// The higher the score.minimal_stake, the better a paged_solution is.
+					.priority(
+						T::MinerTxPriority::get()
+							.saturating_add(paged_solution.score.minimal_stake.saturated_into()),
+					)
+					// Used to deduplicate unsigned solutions: each validator should produce one
+					// paged_solution per round at most, and solutions are not propagate.
+					.and_provides(paged_solution.round)
+					// Transaction should stay in the pool for the duration of the unsigned phase.
+					.longevity(T::UnsignedPhase::get().saturated_into::<u64>())
+					// We don't propagate this. This can never be validated at a remote node.
+					.propagate(false)
+					.build()
+			} else {
+				InvalidTransaction::Call.into()
+			}
+		}
+
+		fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> {
+			if let Call::submit_unsigned { paged_solution, .. } = call {
+				Self::validate_unsigned_checks(paged_solution.as_ref())
+					.map_err(base_error_to_invalid)
+					.map_err(Into::into)
+			} else {
+				Err(InvalidTransaction::Call.into())
+			}
+		}
+	}
+
+	#[pallet::hooks]
+	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
+		fn integrity_test() {
+			assert!(
+				UnsignedWeightsOf::<T>::submit_unsigned().all_lte(T::BlockWeights::get().max_block),
+				"weight of `submit_unsigned` is too high"
+			)
+		}
+
+		#[cfg(feature = "try-runtime")]
+		fn try_state(now: BlockNumberFor<T>) -> Result<(), sp_runtime::TryRuntimeError> {
+			Self::do_try_state(now)
+		}
+
+		fn offchain_worker(now: BlockNumberFor<T>) {
+			use sp_runtime::offchain::storage_lock::{BlockAndTime, StorageLock};
+
+			// Create a lock with the maximum deadline of number of blocks in the unsigned phase.
+			// This should only come useful in an **abrupt** termination of execution, otherwise the
+			// guard will be dropped upon successful execution.
+			let mut lock =
+				StorageLock::<BlockAndTime<frame_system::Pallet<T>>>::with_block_deadline(
+					miner::OffchainWorkerMiner::<T>::OFFCHAIN_LOCK,
+					T::UnsignedPhase::get().saturated_into(),
+				);
+
+			match lock.try_lock() {
+				Ok(_guard) => {
+					Self::do_synchronized_offchain_worker(now);
+				},
+				Err(deadline) => {
+					sublog!(
+						debug,
+						"unsigned",
+						"offchain worker lock not released, deadline is {:?}",
+						deadline
+					);
+				},
+			};
+		}
+	}
+
+	impl<T: Config> Pallet<T> {
+		/// Internal logic of the offchain worker, to be executed only when the offchain lock is
+		/// acquired with success.
+		fn do_synchronized_offchain_worker(now: BlockNumberFor<T>) {
+			use miner::OffchainWorkerMiner;
+
+			let current_phase = crate::Pallet::<T>::current_phase();
+			sublog!(
+				trace,
+				"unsigned",
+				"lock for offchain worker acquired. Phase = {:?}",
+				current_phase
+			);
+			match current_phase {
+				Phase::Unsigned(opened) if opened == now => {
+					// Mine a new solution, cache it, and attempt to submit it
+					let initial_output =
+						OffchainWorkerMiner::<T>::ensure_offchain_repeat_frequency(now)
+							.and_then(|_| OffchainWorkerMiner::<T>::mine_check_save_submit());
+					sublog!(
+						debug,
+						"unsigned",
+						"initial offchain worker output: {:?}",
+						initial_output
+					);
+				},
+				Phase::Unsigned(opened) if opened < now => {
+					// Try and resubmit the cached solution, and recompute ONLY if it is not
+					// feasible.
+					let resubmit_output =
+						OffchainWorkerMiner::<T>::ensure_offchain_repeat_frequency(now).and_then(
+							|_| OffchainWorkerMiner::<T>::restore_or_compute_then_maybe_submit(),
+						);
+					sublog!(
+						debug,
+						"unsigned",
+						"resubmit offchain worker output: {:?}",
+						resubmit_output
+					);
+				},
+				_ => {},
+			}
+		}
+
+		/// The checks that should happen in the `ValidateUnsigned`'s `pre_dispatch` and
+		/// `validate_unsigned` functions.
+		///
+		/// These check both for snapshot independent checks, and some checks that are specific to
+		/// the unsigned phase.
+		pub(crate) fn validate_unsigned_checks(
+			paged_solution: &PagedRawSolution<T::MinerConfig>,
+		) -> Result<(), CommonError> {
+			Self::unsigned_specific_checks(paged_solution)
+				.and(crate::Pallet::<T>::snapshot_independent_checks(paged_solution, None))
+				.map_err(Into::into)
+		}
+
+		/// The checks that are specific to the (this) unsigned pallet.
+		///
+		/// ensure solution has the correct phase, and it has only 1 page.
+		pub fn unsigned_specific_checks(
+			paged_solution: &PagedRawSolution<T::MinerConfig>,
+		) -> Result<(), CommonError> {
+			ensure!(
+				crate::Pallet::<T>::current_phase().is_unsigned(),
+				CommonError::EarlySubmission
+			);
+			ensure!(paged_solution.solution_pages.len() == 1, CommonError::WrongPageCount);
+
+			Ok(())
+		}
+
+		#[cfg(any(test, feature = "runtime-benchmarks", feature = "try-runtime"))]
+		pub(crate) fn do_try_state(
+			_now: BlockNumberFor<T>,
+		) -> Result<(), sp_runtime::TryRuntimeError> {
+			Ok(())
+		}
+	}
+}
+
+#[cfg(test)]
+mod validate_unsigned {
+	use frame_election_provider_support::Support;
+	use frame_support::{
+		pallet_prelude::InvalidTransaction,
+		unsigned::{TransactionSource, TransactionValidityError, ValidateUnsigned},
+	};
+
+	use super::Call;
+	use crate::{mock::*, types::*, verifier::Verifier};
+
+	#[test]
+	fn retracts_weak_score_accepts_threshold_better() {
+		ExtBuilder::unsigned()
+			.solution_improvement_threshold(sp_runtime::Perbill::from_percent(10))
+			.build_and_execute(|| {
+				roll_to_snapshot_created();
+
+				let solution = mine_full_solution().unwrap();
+				load_mock_signed_and_start(solution.clone());
+				roll_to_full_verification();
+
+				// Some good solution is queued now.
+				assert_eq!(
+					<VerifierPallet as Verifier>::queued_score(),
+					Some(ElectionScore {
+						minimal_stake: 55,
+						sum_stake: 130,
+						sum_stake_squared: 8650
+					})
+				);
+
+				roll_to_unsigned_open();
+
+				// this is just worse
+				let attempt =
+					fake_solution(ElectionScore { minimal_stake: 20, ..Default::default() });
+				let call = Call::submit_unsigned { paged_solution: Box::new(attempt) };
+				assert_eq!(
+					UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(),
+					TransactionValidityError::Invalid(InvalidTransaction::Custom(2)),
+				);
+
+				// this is better, but not enough better.
+				let insufficient_improvement = 55 * 105 / 100;
+				let attempt = fake_solution(ElectionScore {
+					minimal_stake: insufficient_improvement,
+					..Default::default()
+				});
+				let call = Call::submit_unsigned { paged_solution: Box::new(attempt) };
+				assert_eq!(
+					UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(),
+					TransactionValidityError::Invalid(InvalidTransaction::Custom(2)),
+				);
+
+				// note that we now have to use a solution with 2 winners, just to pass all of the
+				// snapshot independent checks.
+				let mut paged = raw_paged_from_supports(
+					vec![vec![
+						(40, Support { total: 10, voters: vec![(3, 5)] }),
+						(30, Support { total: 10, voters: vec![(3, 5)] }),
+					]],
+					0,
+				);
+				let sufficient_improvement = 55 * 115 / 100;
+				paged.score =
+					ElectionScore { minimal_stake: sufficient_improvement, ..Default::default() };
+				let call = Call::submit_unsigned { paged_solution: Box::new(paged) };
+				assert!(UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).is_ok());
+			})
+	}
+
+	#[test]
+	fn retracts_wrong_round() {
+		ExtBuilder::unsigned().build_and_execute(|| {
+			roll_to_unsigned_open();
+
+			let mut attempt =
+				fake_solution(ElectionScore { minimal_stake: 5, ..Default::default() });
+			attempt.round += 1;
+			let call = Call::submit_unsigned { paged_solution: Box::new(attempt) };
+
+			assert_eq!(
+				UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(),
+				// WrongRound is index 1
+				TransactionValidityError::Invalid(InvalidTransaction::Custom(1)),
+			);
+		})
+	}
+
+	#[test]
+	fn retracts_too_many_pages_unsigned() {
+		ExtBuilder::unsigned().build_and_execute(|| {
+			// NOTE: unsigned solutions should have just 1 page, regardless of the configured
+			// page count.
+			roll_to_unsigned_open();
+			let attempt = mine_full_solution().unwrap();
+			let call = Call::submit_unsigned { paged_solution: Box::new(attempt) };
+
+			assert_eq!(
+				UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(),
+				// WrongPageCount is index 3
+				TransactionValidityError::Invalid(InvalidTransaction::Custom(3)),
+			);
+
+			let attempt = mine_solution(2).unwrap();
+			let call = Call::submit_unsigned { paged_solution: Box::new(attempt) };
+
+			assert_eq!(
+				UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(),
+				TransactionValidityError::Invalid(InvalidTransaction::Custom(3)),
+			);
+
+			let attempt = mine_solution(1).unwrap();
+			let call = Call::submit_unsigned { paged_solution: Box::new(attempt) };
+
+			assert!(UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).is_ok(),);
+		})
+	}
+
+	#[test]
+	fn retracts_wrong_winner_count() {
+		ExtBuilder::unsigned().desired_targets(2).build_and_execute(|| {
+			roll_to_unsigned_open();
+
+			let paged = raw_paged_from_supports(
+				vec![vec![(40, Support { total: 10, voters: vec![(3, 10)] })]],
+				0,
+			);
+
+			let call = Call::submit_unsigned { paged_solution: Box::new(paged) };
+
+			assert_eq!(
+				UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(),
+				// WrongWinnerCount is index 4
+				TransactionValidityError::Invalid(InvalidTransaction::Custom(4)),
+			);
+		});
+	}
+
+	#[test]
+	fn retracts_wrong_phase() {
+		ExtBuilder::unsigned().signed_phase(5, 0).build_and_execute(|| {
+			let solution = raw_paged_solution_low_score();
+			let call = Call::submit_unsigned { paged_solution: Box::new(solution.clone()) };
+
+			// initial
+			assert_eq!(MultiBlock::current_phase(), Phase::Off);
+			assert!(matches!(
+				<UnsignedPallet as ValidateUnsigned>::validate_unsigned(
+					TransactionSource::Local,
+					&call
+				)
+				.unwrap_err(),
+				// because EarlySubmission is index 0.
+				TransactionValidityError::Invalid(InvalidTransaction::Custom(0))
+			));
+			assert!(matches!(
+				<UnsignedPallet as ValidateUnsigned>::pre_dispatch(&call).unwrap_err(),
+				TransactionValidityError::Invalid(InvalidTransaction::Custom(0))
+			));
+
+			// signed
+			roll_to(20);
+			assert_eq!(MultiBlock::current_phase(), Phase::Signed);
+			assert!(matches!(
+				<UnsignedPallet as ValidateUnsigned>::validate_unsigned(
+					TransactionSource::Local,
+					&call
+				)
+				.unwrap_err(),
+				TransactionValidityError::Invalid(InvalidTransaction::Custom(0))
+			));
+			assert!(matches!(
+				<UnsignedPallet as ValidateUnsigned>::pre_dispatch(&call).unwrap_err(),
+				TransactionValidityError::Invalid(InvalidTransaction::Custom(0))
+			));
+
+			// unsigned
+			roll_to(25);
+			assert!(MultiBlock::current_phase().is_unsigned());
+
+			assert_ok!(<UnsignedPallet as ValidateUnsigned>::validate_unsigned(
+				TransactionSource::Local,
+				&call
+			));
+			assert_ok!(<UnsignedPallet as ValidateUnsigned>::pre_dispatch(&call));
+		})
+	}
+
+	#[test]
+	fn priority_is_set() {
+		ExtBuilder::unsigned()
+			.miner_tx_priority(20)
+			.desired_targets(0)
+			.build_and_execute(|| {
+				roll_to(25);
+				assert!(MultiBlock::current_phase().is_unsigned());
+
+				let solution =
+					fake_solution(ElectionScore { minimal_stake: 5, ..Default::default() });
+				let call = Call::submit_unsigned { paged_solution: Box::new(solution.clone()) };
+
+				assert_eq!(
+					<UnsignedPallet as ValidateUnsigned>::validate_unsigned(
+						TransactionSource::Local,
+						&call
+					)
+					.unwrap()
+					.priority,
+					25
+				);
+			})
+	}
+}
+
+#[cfg(test)]
+mod call {
+	use crate::{mock::*, verifier::Verifier, Snapshot};
+
+	#[test]
+	fn unsigned_submission_e2e() {
+		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
+		ext.execute_with_sanity_checks(|| {
+			roll_to_snapshot_created();
+
+			// snapshot is created..
+			assert_full_snapshot();
+			// ..txpool is empty..
+			assert_eq!(pool.read().transactions.len(), 0);
+			// ..but nothing queued.
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), None);
+
+			// now the OCW should submit something.
+			roll_next_with_ocw(Some(pool.clone()));
+			assert_eq!(pool.read().transactions.len(), 1);
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), None);
+
+			// and now it should be applied.
+			roll_next_with_ocw(Some(pool.clone()));
+			assert_eq!(pool.read().transactions.len(), 0);
+			assert!(matches!(<VerifierPallet as Verifier>::queued_score(), Some(_)));
+		})
+	}
+
+	#[test]
+	#[should_panic(
+		expected = "Invalid unsigned submission must produce invalid block and deprive validator from their authoring reward."
+	)]
+	fn unfeasible_solution_panics() {
+		let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify();
+		ext.execute_with_sanity_checks(|| {
+			roll_to_snapshot_created();
+
+			// snapshot is created..
+			assert_full_snapshot();
+			// ..txpool is empty..
+			assert_eq!(pool.read().transactions.len(), 0);
+			// ..but nothing queued.
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), None);
+
+			// now the OCW should submit something.
+			roll_next_with_ocw(Some(pool.clone()));
+			assert_eq!(pool.read().transactions.len(), 1);
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), None);
+
+			// now we change the snapshot -- this should ensure that the solution becomes invalid.
+			// Note that we don't change the known fingerprint of the solution.
+			Snapshot::<Runtime>::remove_target(2);
+
+			// and now it should be applied.
+			roll_next_with_ocw(Some(pool.clone()));
+			assert_eq!(pool.read().transactions.len(), 0);
+			assert!(matches!(<VerifierPallet as Verifier>::queued_score(), Some(_)));
+		})
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/verifier/benchmarking.rs b/substrate/frame/election-provider-multi-block/src/verifier/benchmarking.rs
new file mode 100644
index 00000000000..4884d24d151
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/verifier/benchmarking.rs
@@ -0,0 +1,234 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::{
+	verifier::{Config, Event, FeasibilityError, Pallet, Status, StatusStorage},
+	CurrentPhase, Phase,
+};
+use frame_benchmarking::v2::*;
+use frame_election_provider_support::{ElectionDataProvider, NposSolution};
+use frame_support::pallet_prelude::*;
+use sp_std::prelude::*;
+
+#[benchmarks(where T: crate::Config + crate::signed::Config + crate::unsigned::Config)]
+mod benchmarks {
+	use super::*;
+
+	// TODO: this is the epitome of bad DevEx because of generics.. create a nice one that works in
+	// frame_system.
+	fn events_for<T: Config>() -> Vec<Event<T>> {
+		frame_system::Pallet::<T>::events()
+			.into_iter()
+			.map(|e| e.event) // convert to inner event
+			.filter_map(|e| {
+				let e = <T as Config>::RuntimeEvent::from_ref(&e);
+				if let Ok(ev) =
+					<<T as Config>::RuntimeEvent as TryInto<Event<T>>>::try_into((*e).clone())
+				{
+					Some(ev)
+				} else {
+					None
+				}
+			})
+			.collect()
+	}
+
+	#[benchmark]
+	fn on_initialize_valid_non_terminal() -> Result<(), BenchmarkError> {
+		// roll to signed validation, with a solution stored in the signed pallet
+		T::DataProvider::set_next_election(crate::Pallet::<T>::reasonable_next_election());
+
+		crate::Pallet::<T>::roll_to_signed_and_submit_full_solution();
+		// roll to verification
+		crate::Pallet::<T>::roll_until_matches(|| {
+			matches!(CurrentPhase::<T>::get(), Phase::SignedValidation(_))
+		});
+
+		// start signal must have been sent by now
+		assert_eq!(StatusStorage::<T>::get(), Status::Ongoing(crate::Pallet::<T>::msp()));
+
+		#[block]
+		{
+			crate::Pallet::<T>::roll_next(true, false);
+		}
+		assert_eq!(StatusStorage::<T>::get(), Status::Ongoing(crate::Pallet::<T>::msp() - 1));
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn on_initialize_valid_terminal() -> Result<(), BenchmarkError> {
+		// roll to signed validation, with a solution stored in the signed pallet
+		T::DataProvider::set_next_election(crate::Pallet::<T>::reasonable_next_election());
+		assert!(
+			T::SignedValidationPhase::get() >= T::Pages::get().into(),
+			"Signed validation phase must be larger than the number of pages"
+		);
+
+		crate::Pallet::<T>::roll_to_signed_and_submit_full_solution();
+		// roll to before the last page of verification
+		crate::Pallet::<T>::roll_until_matches(|| {
+			matches!(CurrentPhase::<T>::get(), Phase::SignedValidation(_))
+		});
+		// start signal must have been sent by now
+		assert_eq!(StatusStorage::<T>::get(), Status::Ongoing(crate::Pallet::<T>::msp()));
+		for _ in 0..(T::Pages::get() - 1) {
+			crate::Pallet::<T>::roll_next(true, false);
+		}
+
+		// we must have verified all pages by now, minus the last one.
+		assert!(matches!(
+			&events_for::<T>()[..],
+			[Event::Verified(_, _), .., Event::Verified(1, _)]
+		));
+
+		// verify the last page.
+		#[block]
+		{
+			crate::Pallet::<T>::roll_next(true, false);
+		}
+
+		// we are done
+		assert_eq!(StatusStorage::<T>::get(), Status::Nothing);
+		// last event is success
+		assert!(matches!(
+			&events_for::<T>()[..],
+			[Event::Verified(_, _), .., Event::Verified(0, _), Event::Queued(_, None)]
+		));
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn on_initialize_invalid_terminal() -> Result<(), BenchmarkError> {
+		// this is the verification of the current page + removing all of the previously valid
+		// pages. The worst case is therefore when the last page is invalid, for example the final
+		// score.
+		assert!(T::Pages::get() >= 2, "benchmark only works if we have more than 2 pages");
+
+		// roll to signed validation, with a solution stored in the signed pallet
+		T::DataProvider::set_next_election(crate::Pallet::<T>::reasonable_next_election());
+
+		// but this solution is corrupt
+		let mut paged_solution = crate::Pallet::<T>::roll_to_signed_and_mine_full_solution();
+		paged_solution.score.minimal_stake -= 1;
+		crate::Pallet::<T>::submit_full_solution(paged_solution);
+
+		// roll to verification
+		crate::Pallet::<T>::roll_until_matches(|| {
+			matches!(CurrentPhase::<T>::get(), Phase::SignedValidation(_))
+		});
+
+		assert_eq!(StatusStorage::<T>::get(), Status::Ongoing(crate::Pallet::<T>::msp()));
+		// verify all pages, except for the last one.
+		for i in 0..T::Pages::get() - 1 {
+			crate::Pallet::<T>::roll_next(true, false);
+			assert_eq!(
+				StatusStorage::<T>::get(),
+				Status::Ongoing(crate::Pallet::<T>::msp() - 1 - i)
+			);
+		}
+
+		// next page to be verified is the last one
+		assert_eq!(StatusStorage::<T>::get(), Status::Ongoing(crate::Pallet::<T>::lsp()));
+		assert!(matches!(
+			&events_for::<T>()[..],
+			[Event::Verified(_, _), .., Event::Verified(1, _)]
+		));
+
+		#[block]
+		{
+			crate::Pallet::<T>::roll_next(true, false);
+		}
+
+		// we are now reset.
+		assert_eq!(StatusStorage::<T>::get(), Status::Nothing);
+		assert!(matches!(
+			&events_for::<T>()[..],
+			[
+				..,
+				Event::Verified(0, _),
+				Event::VerificationFailed(0, FeasibilityError::InvalidScore)
+			]
+		));
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn on_initialize_invalid_non_terminal(
+		// number of valid pages that have been verified, before we verify the non-terminal invalid
+		// page.
+		v: Linear<0, { T::Pages::get() - 1 }>,
+	) -> Result<(), BenchmarkError> {
+		assert!(T::Pages::get() >= 2, "benchmark only works if we have more than 2 pages");
+
+		T::DataProvider::set_next_election(crate::Pallet::<T>::reasonable_next_election());
+
+		// roll to signed validation, with a solution stored in the signed pallet, but this solution
+		// is corrupt in its msp.
+		let mut paged_solution = crate::Pallet::<T>::roll_to_signed_and_mine_full_solution();
+		let page_to_corrupt = crate::Pallet::<T>::msp() - v;
+		crate::log!(
+			info,
+			"pages of solution: {:?}, to corrupt {}, v {}",
+			paged_solution.solution_pages.len(),
+			page_to_corrupt,
+			v
+		);
+		paged_solution.solution_pages[page_to_corrupt as usize].corrupt();
+		crate::Pallet::<T>::submit_full_solution(paged_solution);
+
+		// roll to verification
+		crate::Pallet::<T>::roll_until_matches(|| {
+			matches!(CurrentPhase::<T>::get(), Phase::SignedValidation(_))
+		});
+
+		// we should be ready to go
+		assert_eq!(StatusStorage::<T>::get(), Status::Ongoing(crate::Pallet::<T>::msp()));
+
+		// validate the the parameterized number of valid pages.
+		for _ in 0..v {
+			crate::Pallet::<T>::roll_next(true, false);
+		}
+
+		// we are still ready to continue
+		assert_eq!(StatusStorage::<T>::get(), Status::Ongoing(crate::Pallet::<T>::msp() - v));
+
+		// verify one page, which will be invalid.
+		#[block]
+		{
+			crate::Pallet::<T>::roll_next(true, false);
+		}
+
+		// we are now reset, because this page was invalid.
+		assert_eq!(StatusStorage::<T>::get(), Status::Nothing);
+
+		assert!(matches!(
+			&events_for::<T>()[..],
+			[.., Event::VerificationFailed(_, FeasibilityError::NposElection(_))]
+		));
+
+		Ok(())
+	}
+
+	impl_benchmark_test_suite!(
+		Pallet,
+		crate::mock::ExtBuilder::full().build_unchecked(),
+		crate::mock::Runtime
+	);
+}
diff --git a/substrate/frame/election-provider-multi-block/src/verifier/impls.rs b/substrate/frame/election-provider-multi-block/src/verifier/impls.rs
new file mode 100644
index 00000000000..0f5f0fb911b
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/verifier/impls.rs
@@ -0,0 +1,955 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use super::*;
+use crate::{
+	helpers,
+	types::VoterOf,
+	unsigned::miner::{MinerConfig, SupportsOfMiner},
+	verifier::Verifier,
+	SolutionOf,
+};
+use codec::{Decode, Encode, MaxEncodedLen};
+use frame_election_provider_support::{
+	ExtendedBalance, NposSolution, PageIndex, TryFromOtherBounds,
+};
+use frame_support::{
+	ensure,
+	pallet_prelude::{ValueQuery, *},
+	traits::{defensive_prelude::*, Defensive, Get},
+};
+use frame_system::pallet_prelude::*;
+use pallet::*;
+use sp_npos_elections::{evaluate_support, ElectionScore, EvaluateSupport};
+use sp_runtime::Perbill;
+use sp_std::{collections::btree_map::BTreeMap, prelude::*};
+
+pub(crate) type SupportsOfVerifier<V> = frame_election_provider_support::BoundedSupports<
+	<V as Verifier>::AccountId,
+	<V as Verifier>::MaxWinnersPerPage,
+	<V as Verifier>::MaxBackersPerWinner,
+>;
+
+pub(crate) type VerifierWeightsOf<T> = <T as Config>::WeightInfo;
+
+/// The status of this pallet.
+#[derive(
+	Encode, Decode, scale_info::TypeInfo, Clone, Copy, MaxEncodedLen, Debug, PartialEq, Eq,
+)]
+pub enum Status {
+	/// A verification is ongoing, and the next page that will be verified is indicated with the
+	/// inner value.
+	Ongoing(PageIndex),
+	/// Nothing is happening.
+	Nothing,
+}
+
+impl Default for Status {
+	fn default() -> Self {
+		Self::Nothing
+	}
+}
+
+/// Enum to point to the valid variant of the [`QueuedSolution`].
+#[derive(Encode, Decode, scale_info::TypeInfo, Clone, Copy, MaxEncodedLen)]
+enum ValidSolution {
+	X,
+	Y,
+}
+
+impl Default for ValidSolution {
+	fn default() -> Self {
+		ValidSolution::Y
+	}
+}
+
+impl ValidSolution {
+	fn other(&self) -> Self {
+		match *self {
+			ValidSolution::X => ValidSolution::Y,
+			ValidSolution::Y => ValidSolution::X,
+		}
+	}
+}
+
+/// A simple newtype that represents the partial backing of a winner. It only stores the total
+/// backing, and the sum of backings, as opposed to a [`sp_npos_elections::Support`] that also
+/// stores all of the backers' individual contribution.
+///
+/// This is mainly here to allow us to implement `Backings` for it.
+#[derive(Default, Encode, Decode, MaxEncodedLen, scale_info::TypeInfo)]
+pub struct PartialBackings {
+	/// The total backing of this particular winner.
+	pub total: ExtendedBalance,
+	/// The number of backers.
+	pub backers: u32,
+}
+
+impl sp_npos_elections::Backings for PartialBackings {
+	fn total(&self) -> ExtendedBalance {
+		self.total
+	}
+}
+
+#[frame_support::pallet]
+pub(crate) mod pallet {
+	use super::*;
+	#[pallet::config]
+	#[pallet::disable_frame_system_supertrait_check]
+	pub trait Config: crate::Config {
+		/// The overarching event type.
+		type RuntimeEvent: From<Event<Self>>
+			+ IsType<<Self as frame_system::Config>::RuntimeEvent>
+			+ TryInto<Event<Self>>
+			+ Clone;
+
+		/// The minimum amount of improvement to the solution score that defines a solution as
+		/// "better".
+		#[pallet::constant]
+		type SolutionImprovementThreshold: Get<Perbill>;
+
+		/// Maximum number of backers, per winner, among all pages of an election.
+		///
+		/// This can only be checked at the very final step of verification.
+		type MaxBackersPerWinnerFinal: Get<u32>;
+
+		/// Maximum number of backers, per winner, per page.
+		type MaxBackersPerWinner: Get<u32>;
+
+		/// Maximum number of supports (aka. winners/validators/targets) that can be represented in
+		/// a page of results.
+		type MaxWinnersPerPage: Get<u32>;
+
+		/// Something that can provide the solution data to the verifier.
+		///
+		/// In reality, this will be fulfilled by the signed phase.
+		type SolutionDataProvider: crate::verifier::SolutionDataProvider<
+			Solution = SolutionOf<Self::MinerConfig>,
+		>;
+
+		/// The weight information of this pallet.
+		type WeightInfo: super::WeightInfo;
+	}
+
+	#[pallet::event]
+	#[pallet::generate_deposit(pub(super) fn deposit_event)]
+	pub enum Event<T> {
+		/// The verification data was unavailable and it could not continue.
+		VerificationDataUnavailable,
+		/// A verification failed at the given page.
+		///
+		/// NOTE: if the index is 0, then this could mean either the feasibility of the last page
+		/// was wrong, or the final checks of `finalize_verification` failed.
+		VerificationFailed(PageIndex, FeasibilityError),
+		/// The given page of a solution has been verified, with the given number of winners being
+		/// found in it.
+		Verified(PageIndex, u32),
+		/// A solution with the given score has replaced our current best solution.
+		Queued(ElectionScore, Option<ElectionScore>),
+	}
+
+	// TODO this has to be entirely re-done to take into account that for lazy deletions. We store
+	// the queued solutions per round and account id. if a solution is invalid, we just mark it as
+	// garbage and delete it later.
+	// we keep a pointer to (round, who) which stores the current best solution.
+
+	/// A wrapper interface for the storage items related to the queued solution.
+	///
+	/// It wraps the following:
+	///
+	/// - `QueuedSolutionX`
+	/// - `QueuedSolutionY`
+	/// - `QueuedValidVariant`
+	/// - `QueuedSolutionScore`
+	/// - `QueuedSolutionBackings`
+	///
+	/// As the name suggests, `QueuedValidVariant` points to the correct variant between
+	/// `QueuedSolutionX` and `QueuedSolutionY`. In the context of this pallet, by VALID and
+	/// INVALID variant we mean either of these two storage items, based on the value of
+	/// `QueuedValidVariant`.
+	///
+	/// ### Invariants
+	///
+	/// The following conditions must be met at all times for this group of storage items to be
+	/// sane.
+	///
+	/// - `QueuedSolutionScore` must always be correct. In other words, it should correctly be the
+	///   score of `QueuedValidVariant`.
+	/// - `QueuedSolutionScore` must always be [`Config::SolutionImprovementThreshold`] better than
+	///   `MinimumScore`.
+	/// - The number of existing keys in `QueuedSolutionBackings` must always match that of the
+	///   INVALID variant.
+	///
+	/// Moreover, the following conditions must be met when this pallet is in [`Status::Nothing`],
+	/// meaning that no ongoing asynchronous verification is ongoing.
+	///
+	/// - No keys should exist in the INVALID variant.
+	/// 	- This implies that no data should exist in `QueuedSolutionBackings`.
+	///
+	/// > Note that some keys *might* exist in the queued variant, but since partial solutions
+	/// > (having less than `T::Pages` pages) are in principle correct, we cannot assert anything on
+	/// > the number of keys in the VALID variant. In fact, an empty solution with score of [0, 0,
+	/// > 0] can also be correct.
+	///
+	/// No additional conditions must be met when the pallet is in [`Status::Ongoing`]. The number
+	/// of pages in
+	pub struct QueuedSolution<T: Config>(sp_std::marker::PhantomData<T>);
+	impl<T: Config> QueuedSolution<T> {
+		/// Private helper for mutating the storage group.
+		fn mutate_checked<R>(mutate: impl FnOnce() -> R) -> R {
+			let r = mutate();
+			#[cfg(debug_assertions)]
+			assert!(Self::sanity_check().is_ok());
+			r
+		}
+
+		/// Finalize a correct solution.
+		///
+		/// Should be called at the end of a verification process, once we are sure that a certain
+		/// solution is 100% correct.
+		///
+		/// It stores its score, flips the pointer to it being the current best one, and clears all
+		/// the backings and the invalid variant. (note: in principle, we can skip clearing the
+		/// backings here)
+		pub(crate) fn finalize_correct(score: ElectionScore) {
+			sublog!(
+				info,
+				"verifier",
+				"finalizing verification a correct solution, replacing old score {:?} with {:?}",
+				QueuedSolutionScore::<T>::get(),
+				score
+			);
+
+			Self::mutate_checked(|| {
+				QueuedValidVariant::<T>::mutate(|v| *v = v.other());
+				QueuedSolutionScore::<T>::put(score);
+
+				// Clear what was previously the valid variant. Also clears the partial backings.
+				Self::clear_invalid_and_backings_unchecked();
+			});
+		}
+
+		/// Clear all relevant information of an invalid solution.
+		///
+		/// Should be called at any step, if we encounter an issue which makes the solution
+		/// infeasible.
+		pub(crate) fn clear_invalid_and_backings() {
+			Self::mutate_checked(Self::clear_invalid_and_backings_unchecked)
+		}
+
+		/// Same as [`clear_invalid_and_backings`], but without any checks for the integrity of the
+		/// storage item group.
+		pub(crate) fn clear_invalid_and_backings_unchecked() {
+			// clear is safe as we delete at most `Pages` entries, and `Pages` is bounded.
+			match Self::invalid() {
+				ValidSolution::X => clear_paged_map!(QueuedSolutionX::<T>),
+				ValidSolution::Y => clear_paged_map!(QueuedSolutionY::<T>),
+			};
+			clear_paged_map!(QueuedSolutionBackings::<T>);
+		}
+
+		/// Write a single page of a valid solution into the `invalid` variant of the storage.
+		///
+		/// This should only be called once we are sure that this particular page is 100% correct.
+		///
+		/// This is called after *a page* has been validated, but the entire solution is not yet
+		/// known to be valid. At this stage, we write to the invalid variant. Once all pages are
+		/// verified, a call to [`finalize_correct`] will seal the correct pages and flip the
+		/// invalid/valid variants.
+		pub(crate) fn set_invalid_page(page: PageIndex, supports: SupportsOfVerifier<Pallet<T>>) {
+			use frame_support::traits::TryCollect;
+			Self::mutate_checked(|| {
+				let backings: BoundedVec<_, _> = supports
+					.iter()
+					.map(|(x, s)| (x.clone(), PartialBackings { total: s.total, backers: s.voters.len() as u32 } ))
+					.try_collect()
+					.expect("`SupportsOfVerifier` is bounded by <Pallet<T> as Verifier>::MaxWinnersPerPage, which is assured to be the same as `T::MaxWinnersPerPage` in an integrity test");
+				QueuedSolutionBackings::<T>::insert(page, backings);
+
+				match Self::invalid() {
+					ValidSolution::X => QueuedSolutionX::<T>::insert(page, supports),
+					ValidSolution::Y => QueuedSolutionY::<T>::insert(page, supports),
+				}
+			})
+		}
+
+		/// Write a single page to the valid variant directly.
+		///
+		/// This is not the normal flow of writing, and the solution is not checked.
+		///
+		/// This is only useful to override the valid solution with a single (likely backup)
+		/// solution.
+		pub(crate) fn force_set_single_page_valid(
+			page: PageIndex,
+			supports: SupportsOfVerifier<Pallet<T>>,
+			score: ElectionScore,
+		) {
+			Self::mutate_checked(|| {
+				// clear everything about valid solutions.
+				match Self::valid() {
+					ValidSolution::X => clear_paged_map!(QueuedSolutionX::<T>),
+					ValidSolution::Y => clear_paged_map!(QueuedSolutionY::<T>),
+				};
+				QueuedSolutionScore::<T>::kill();
+
+				// write a single new page.
+				match Self::valid() {
+					ValidSolution::X => QueuedSolutionX::<T>::insert(page, supports),
+					ValidSolution::Y => QueuedSolutionY::<T>::insert(page, supports),
+				}
+
+				// write the score.
+				QueuedSolutionScore::<T>::put(score);
+			})
+		}
+
+		/// Clear all storage items.
+		///
+		/// Should only be called once everything is done.
+		pub(crate) fn kill() {
+			Self::mutate_checked(|| {
+				clear_paged_map!(QueuedSolutionX::<T>);
+				clear_paged_map!(QueuedSolutionY::<T>);
+				QueuedValidVariant::<T>::kill();
+				clear_paged_map!(QueuedSolutionBackings::<T>);
+				QueuedSolutionScore::<T>::kill();
+			})
+		}
+
+		// -- non-mutating methods.
+
+		/// Return the `score` and `winner_count` of verifying solution.
+		///
+		/// Assumes that all the corresponding pages of `QueuedSolutionBackings` exist, then it
+		/// computes the final score of the solution that is currently at the end of its
+		/// verification process.
+		///
+		/// This solution corresponds to whatever is stored in the INVALID variant of
+		/// `QueuedSolution`. Recall that the score of this solution is not yet verified, so it
+		/// should never become `valid`.
+		pub(crate) fn compute_invalid_score() -> Result<(ElectionScore, u32), FeasibilityError> {
+			// ensure that this is only called when all pages are verified individually.
+			// TODO: this is a very EXPENSIVE, and perhaps unreasonable check. A partial solution
+			// could very well be valid.
+			if QueuedSolutionBackings::<T>::iter_keys().count() != T::Pages::get() as usize {
+				return Err(FeasibilityError::Incomplete)
+			}
+
+			let mut total_supports: BTreeMap<T::AccountId, PartialBackings> = Default::default();
+			for (who, PartialBackings { backers, total }) in
+				QueuedSolutionBackings::<T>::iter().flat_map(|(_, pb)| pb)
+			{
+				let entry = total_supports.entry(who).or_default();
+				entry.total = entry.total.saturating_add(total);
+				entry.backers = entry.backers.saturating_add(backers);
+
+				if entry.backers > T::MaxBackersPerWinnerFinal::get() {
+					return Err(FeasibilityError::FailedToBoundSupport)
+				}
+			}
+
+			let winner_count = total_supports.len() as u32;
+			let score = evaluate_support(total_supports.into_values());
+
+			Ok((score, winner_count))
+		}
+
+		/// The score of the current best solution, if any.
+		pub(crate) fn queued_score() -> Option<ElectionScore> {
+			QueuedSolutionScore::<T>::get()
+		}
+
+		/// Get a page of the current queued (aka valid) solution.
+		pub(crate) fn get_queued_solution_page(
+			page: PageIndex,
+		) -> Option<SupportsOfVerifier<Pallet<T>>> {
+			match Self::valid() {
+				ValidSolution::X => QueuedSolutionX::<T>::get(page),
+				ValidSolution::Y => QueuedSolutionY::<T>::get(page),
+			}
+		}
+
+		fn valid() -> ValidSolution {
+			QueuedValidVariant::<T>::get()
+		}
+
+		fn invalid() -> ValidSolution {
+			Self::valid().other()
+		}
+	}
+
+	#[allow(unused)]
+	#[cfg(any(test, feature = "runtime-benchmarks", feature = "try-runtime", debug_assertions))]
+	impl<T: Config> QueuedSolution<T> {
+		pub(crate) fn valid_iter(
+		) -> impl Iterator<Item = (PageIndex, SupportsOfVerifier<Pallet<T>>)> {
+			match Self::valid() {
+				ValidSolution::X => QueuedSolutionX::<T>::iter(),
+				ValidSolution::Y => QueuedSolutionY::<T>::iter(),
+			}
+		}
+
+		pub(crate) fn invalid_iter(
+		) -> impl Iterator<Item = (PageIndex, SupportsOfVerifier<Pallet<T>>)> {
+			match Self::invalid() {
+				ValidSolution::X => QueuedSolutionX::<T>::iter(),
+				ValidSolution::Y => QueuedSolutionY::<T>::iter(),
+			}
+		}
+
+		pub(crate) fn get_valid_page(page: PageIndex) -> Option<SupportsOfVerifier<Pallet<T>>> {
+			match Self::valid() {
+				ValidSolution::X => QueuedSolutionX::<T>::get(page),
+				ValidSolution::Y => QueuedSolutionY::<T>::get(page),
+			}
+		}
+
+		pub(crate) fn backing_iter() -> impl Iterator<
+			Item = (PageIndex, BoundedVec<(T::AccountId, PartialBackings), T::MaxWinnersPerPage>),
+		> {
+			QueuedSolutionBackings::<T>::iter()
+		}
+
+		/// Ensure that all the storage items managed by this struct are in `kill` state, meaning
+		/// that in the expect state after an election is OVER.
+		pub(crate) fn assert_killed() {
+			use frame_support::assert_storage_noop;
+			assert_storage_noop!(Self::kill());
+		}
+
+		/// Ensure this storage item group is in correct state.
+		pub(crate) fn sanity_check() -> Result<(), sp_runtime::DispatchError> {
+			// score is correct and better than min-score.
+			ensure!(
+				Pallet::<T>::minimum_score()
+					.zip(Self::queued_score())
+					.map_or(true, |(min_score, score)| score
+						.strict_threshold_better(min_score, Perbill::zero())),
+				"queued solution has weak score (min-score)"
+			);
+
+			if let Some(queued_score) = Self::queued_score() {
+				let mut backing_map: BTreeMap<T::AccountId, PartialBackings> = BTreeMap::new();
+				Self::valid_iter()
+					.flat_map(|(_, supports)| supports)
+					.for_each(|(who, support)| {
+						let entry = backing_map.entry(who).or_default();
+						entry.total = entry.total.saturating_add(support.total);
+					});
+				let real_score = evaluate_support(backing_map.into_values());
+				ensure!(real_score == queued_score, "queued solution has wrong score");
+			}
+
+			// The number of existing keys in `QueuedSolutionBackings` must always match that of
+			// the INVALID variant.
+			ensure!(
+				QueuedSolutionBackings::<T>::iter().count() == Self::invalid_iter().count(),
+				"incorrect number of backings pages",
+			);
+
+			if let Status::Nothing = StatusStorage::<T>::get() {
+				ensure!(Self::invalid_iter().count() == 0, "dangling data in invalid variant");
+			}
+
+			Ok(())
+		}
+	}
+
+	// -- private storage items, managed by `QueuedSolution`.
+
+	/// The `X` variant of the current queued solution. Might be the valid one or not.
+	///
+	/// The two variants of this storage item is to avoid the need of copying. Recall that once a
+	/// `VerifyingSolution` is being processed, it needs to write its partial supports *somewhere*.
+	/// Writing theses supports on top of a *good* queued supports is wrong, since we might bail.
+	/// Writing them to a bugger and copying at the ned is slightly better, but expensive. This flag
+	/// system is best of both worlds.
+	#[pallet::storage]
+	type QueuedSolutionX<T: Config> =
+		StorageMap<_, Twox64Concat, PageIndex, SupportsOfVerifier<Pallet<T>>>;
+	#[pallet::storage]
+	/// The `Y` variant of the current queued solution. Might be the valid one or not.
+	type QueuedSolutionY<T: Config> =
+		StorageMap<_, Twox64Concat, PageIndex, SupportsOfVerifier<Pallet<T>>>;
+	/// Pointer to the variant of [`QueuedSolutionX`] or [`QueuedSolutionY`] that is currently
+	/// valid.
+	#[pallet::storage]
+	type QueuedValidVariant<T: Config> = StorageValue<_, ValidSolution, ValueQuery>;
+	/// The `(amount, count)` of backings, divided per page.
+	///
+	/// This is stored because in the last block of verification we need them to compute the score,
+	/// and check `MaxBackersPerWinnerFinal`.
+	///
+	/// This can only ever live for the invalid variant of the solution. Once it is valid, we don't
+	/// need this information anymore; the score is already computed once in
+	/// [`QueuedSolutionScore`], and the backing counts are checked.
+	#[pallet::storage]
+	type QueuedSolutionBackings<T: Config> = StorageMap<
+		_,
+		Twox64Concat,
+		PageIndex,
+		BoundedVec<(T::AccountId, PartialBackings), T::MaxWinnersPerPage>,
+	>;
+	/// The score of the valid variant of [`QueuedSolution`].
+	///
+	/// This only ever lives for the `valid` variant.
+	#[pallet::storage]
+	type QueuedSolutionScore<T: Config> = StorageValue<_, ElectionScore>;
+	// -- ^^ private storage items, managed by `QueuedSolution`.
+
+	/// The minimum score that each solution must attain in order to be considered feasible.
+	#[pallet::storage]
+	#[pallet::getter(fn minimum_score)]
+	pub(crate) type MinimumScore<T: Config> = StorageValue<_, ElectionScore>;
+
+	/// Storage item for [`Status`].
+	#[pallet::storage]
+	#[pallet::getter(fn status_storage)]
+	pub(crate) type StatusStorage<T: Config> = StorageValue<_, Status, ValueQuery>;
+
+	#[pallet::pallet]
+	pub struct Pallet<T>(PhantomData<T>);
+
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {}
+
+	#[pallet::hooks]
+	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
+		fn integrity_test() {
+			// ensure that we have funneled some of our type parameters EXACTLY as-is to the
+			// verifier trait interface we implement.
+			assert_eq!(T::MaxWinnersPerPage::get(), <Self as Verifier>::MaxWinnersPerPage::get());
+			assert_eq!(
+				T::MaxBackersPerWinner::get(),
+				<Self as Verifier>::MaxBackersPerWinner::get()
+			);
+			assert!(T::MaxBackersPerWinner::get() <= T::MaxBackersPerWinnerFinal::get());
+		}
+
+		fn on_initialize(_n: BlockNumberFor<T>) -> Weight {
+			Self::do_on_initialize()
+		}
+
+		#[cfg(feature = "try-runtime")]
+		fn try_state(_now: BlockNumberFor<T>) -> Result<(), sp_runtime::TryRuntimeError> {
+			Self::do_try_state(_now)
+		}
+	}
+}
+
+impl<T: Config> Pallet<T> {
+	fn do_on_initialize() -> Weight {
+		if let Status::Ongoing(current_page) = Self::status_storage() {
+			let maybe_page_solution =
+				<T::SolutionDataProvider as SolutionDataProvider>::get_page(current_page);
+
+			if maybe_page_solution.as_ref().is_none() {
+				// the data provider has zilch, revert to a clean state, waiting for a new `start`.
+				sublog!(
+					error,
+					"verifier",
+					"T::SolutionDataProvider failed to deliver page {}. This is an unexpected error.",
+					current_page,
+				);
+
+				QueuedSolution::<T>::clear_invalid_and_backings();
+				StatusStorage::<T>::put(Status::Nothing);
+				T::SolutionDataProvider::report_result(VerificationResult::DataUnavailable);
+
+				Self::deposit_event(Event::<T>::VerificationDataUnavailable);
+				// TODO: weight
+				return Default::default();
+			}
+
+			let page_solution = maybe_page_solution.expect("Option checked to not be None; qed");
+			let maybe_supports = Self::feasibility_check_page_inner(page_solution, current_page);
+
+			sublog!(
+				debug,
+				"verifier",
+				"verified page {} of a solution, outcome = {:?}",
+				current_page,
+				maybe_supports.as_ref().map(|s| s.len())
+			);
+
+			match maybe_supports {
+				Ok(supports) => {
+					Self::deposit_event(Event::<T>::Verified(current_page, supports.len() as u32));
+					QueuedSolution::<T>::set_invalid_page(current_page, supports);
+
+					if current_page > crate::Pallet::<T>::lsp() {
+						// not last page, just tick forward.
+						StatusStorage::<T>::put(Status::Ongoing(current_page.saturating_sub(1)));
+						VerifierWeightsOf::<T>::on_initialize_valid_non_terminal()
+					} else {
+						// last page, finalize everything. Solution data provider must always have a
+						// score for us at this point. Not much point in reporting a result, we just
+						// assume default score, which will almost certainly fail and cause a proper
+						// cleanup of the pallet, which is what we want anyways.
+						let claimed_score =
+							T::SolutionDataProvider::get_score().defensive_unwrap_or_default();
+
+						// in both cases of the following match, we are not back to the nothing
+						// state.
+						StatusStorage::<T>::put(Status::Nothing);
+
+						match Self::finalize_async_verification(claimed_score) {
+							Ok(_) => {
+								T::SolutionDataProvider::report_result(VerificationResult::Queued);
+							},
+							Err(_) => {
+								T::SolutionDataProvider::report_result(
+									VerificationResult::Rejected,
+								);
+								// In case of any of the errors, kill the solution.
+								QueuedSolution::<T>::clear_invalid_and_backings();
+							},
+						}
+						VerifierWeightsOf::<T>::on_initialize_valid_terminal()
+					}
+				},
+				Err(err) => {
+					// the page solution was invalid.
+					Self::deposit_event(Event::<T>::VerificationFailed(current_page, err));
+					StatusStorage::<T>::put(Status::Nothing);
+					QueuedSolution::<T>::clear_invalid_and_backings();
+					T::SolutionDataProvider::report_result(VerificationResult::Rejected);
+					// TODO: use lower weight if non-terminal.
+					VerifierWeightsOf::<T>::on_initialize_invalid_terminal()
+				},
+			}
+		} else {
+			// TODO: weight for when nothing happens
+			Default::default()
+		}
+	}
+
+	fn do_verify_synchronous(
+		partial_solution: SolutionOf<T::MinerConfig>,
+		claimed_score: ElectionScore,
+		page: PageIndex,
+	) -> Result<SupportsOfVerifier<Self>, FeasibilityError> {
+		// first, ensure this score will be good enough, even if valid..
+		let _ = Self::ensure_score_quality(claimed_score)?;
+
+		// then actually check feasibility...
+		// NOTE: `MaxBackersPerWinnerFinal` is also already checked here.
+		let supports = Self::feasibility_check_page_inner(partial_solution, page)?;
+
+		// then check that the number of winners was exactly enough..
+		let desired_targets =
+			crate::Snapshot::<T>::desired_targets().ok_or(FeasibilityError::SnapshotUnavailable)?;
+		ensure!(supports.len() as u32 == desired_targets, FeasibilityError::WrongWinnerCount);
+
+		// then check the score was truth..
+		let truth_score = supports.evaluate();
+		ensure!(truth_score == claimed_score, FeasibilityError::InvalidScore);
+
+		// and finally queue the solution.
+		QueuedSolution::<T>::force_set_single_page_valid(page, supports.clone(), truth_score);
+
+		Ok(supports)
+	}
+
+	/// Finalize an asynchronous verification. Checks the final score for correctness, and ensures
+	/// that it matches all of the criteria.
+	///
+	/// This should only be called when all pages of an async verification are done.
+	///
+	/// Returns:
+	/// - `Ok()` if everything is okay, at which point the valid variant of the queued solution will
+	/// be updated. Returns
+	/// - `Err(Feasibility)` if any of the last verification steps fail.
+	fn finalize_async_verification(claimed_score: ElectionScore) -> Result<(), FeasibilityError> {
+		let outcome = QueuedSolution::<T>::compute_invalid_score()
+			.and_then(|(final_score, winner_count)| {
+				let desired_targets = crate::Snapshot::<T>::desired_targets().unwrap();
+				// claimed_score checked prior in seal_unverified_solution
+				match (final_score == claimed_score, winner_count == desired_targets) {
+					(true, true) => {
+						// all good, finalize this solution
+						// NOTE: must be before the call to `finalize_correct`.
+						Self::deposit_event(Event::<T>::Queued(
+							final_score,
+							QueuedSolution::<T>::queued_score(), /* the previous score, now
+							                                      * ejected. */
+						));
+						QueuedSolution::<T>::finalize_correct(final_score);
+						Ok(())
+					},
+					(false, true) => Err(FeasibilityError::InvalidScore),
+					(true, false) => Err(FeasibilityError::WrongWinnerCount),
+					(false, false) => Err(FeasibilityError::InvalidScore),
+				}
+			})
+			.map_err(|err| {
+				sublog!(warn, "verifier", "Finalizing solution was invalid due to {:?}.", err);
+				// and deposit an event about it.
+				Self::deposit_event(Event::<T>::VerificationFailed(0, err.clone()));
+				err
+			});
+		sublog!(debug, "verifier", "finalize verification outcome: {:?}", outcome);
+		outcome
+	}
+
+	/// Ensure that the given score is:
+	///
+	/// - better than the queued solution, if one exists.
+	/// - greater than the minimum untrusted score.
+	pub(crate) fn ensure_score_quality(score: ElectionScore) -> Result<(), FeasibilityError> {
+		let is_improvement = <Self as Verifier>::queued_score().map_or(true, |best_score| {
+			score.strict_threshold_better(best_score, T::SolutionImprovementThreshold::get())
+		});
+		ensure!(is_improvement, FeasibilityError::ScoreTooLow);
+
+		let is_greater_than_min_trusted = Self::minimum_score()
+			.map_or(true, |min_score| score.strict_threshold_better(min_score, Perbill::zero()));
+		ensure!(is_greater_than_min_trusted, FeasibilityError::ScoreTooLow);
+
+		Ok(())
+	}
+
+	/// Do the full feasibility check:
+	///
+	/// - check all edges.
+	/// - checks `MaxBackersPerWinner` to be respected IN THIS PAGE.
+	/// - checks the number of winners to be less than or equal to `DesiredTargets` IN THIS PAGE
+	///   ONLY.
+	pub(super) fn feasibility_check_page_inner(
+		partial_solution: SolutionOf<T::MinerConfig>,
+		page: PageIndex,
+	) -> Result<SupportsOfVerifier<Self>, FeasibilityError> {
+		// Read the corresponding snapshots.
+		let snapshot_targets =
+			crate::Snapshot::<T>::targets().ok_or(FeasibilityError::SnapshotUnavailable)?;
+		let snapshot_voters =
+			crate::Snapshot::<T>::voters(page).ok_or(FeasibilityError::SnapshotUnavailable)?;
+		let desired_targets =
+			crate::Snapshot::<T>::desired_targets().ok_or(FeasibilityError::SnapshotUnavailable)?;
+
+		feasibility_check_page_inner_with_snapshot::<T::MinerConfig>(
+			partial_solution,
+			&snapshot_voters,
+			&snapshot_targets,
+			desired_targets,
+		)
+		.and_then(|miner_supports| {
+			SupportsOfVerifier::<Self>::try_from_other_bounds(miner_supports)
+				.defensive_map_err(|_| FeasibilityError::FailedToBoundSupport)
+		})
+	}
+
+	#[cfg(any(test, feature = "runtime-benchmarks", feature = "try-runtime"))]
+	pub(crate) fn do_try_state(_now: BlockNumberFor<T>) -> Result<(), sp_runtime::TryRuntimeError> {
+		QueuedSolution::<T>::sanity_check()
+	}
+}
+
+/// Same as `feasibility_check_page_inner`, but with a snapshot.
+///
+/// This is exported as a standalone function, relying on `MinerConfig` rather than `Config` so that
+/// it can be used in any offchain miner.
+pub fn feasibility_check_page_inner_with_snapshot<T: MinerConfig>(
+	partial_solution: SolutionOf<T>,
+	snapshot_voters: &BoundedVec<VoterOf<T>, T::VoterSnapshotPerBlock>,
+	snapshot_targets: &BoundedVec<T::AccountId, T::TargetSnapshotPerBlock>,
+	desired_targets: u32,
+) -> Result<SupportsOfMiner<T>, FeasibilityError> {
+	// ----- Start building. First, we need some closures.
+	let cache = helpers::generate_voter_cache::<T, _>(snapshot_voters);
+	let voter_at = helpers::voter_at_fn::<T>(snapshot_voters);
+	let target_at = helpers::target_at_fn::<T>(snapshot_targets);
+	let voter_index = helpers::voter_index_fn_usize::<T>(&cache);
+
+	// Then convert solution -> assignment. This will fail if any of the indices are
+	// gibberish.
+	let assignments = partial_solution
+		.into_assignment(voter_at, target_at)
+		.map_err::<FeasibilityError, _>(Into::into)?;
+
+	// Ensure that assignments are all correct.
+	let _ = assignments
+		.iter()
+		.map(|ref assignment| {
+			// Check that assignment.who is actually a voter (defensive-only). NOTE: while
+			// using the index map from `voter_index` is better than a blind linear search,
+			// this *still* has room for optimization. Note that we had the index when we
+			// did `solution -> assignment` and we lost it. Ideal is to keep the index
+			// around.
+
+			// Defensive-only: must exist in the snapshot.
+			let snapshot_index =
+				voter_index(&assignment.who).ok_or(FeasibilityError::InvalidVoter)?;
+			// Defensive-only: index comes from the snapshot, must exist.
+			let (_voter, _stake, targets) =
+				snapshot_voters.get(snapshot_index).ok_or(FeasibilityError::InvalidVoter)?;
+			debug_assert!(*_voter == assignment.who);
+
+			// Check that all of the targets are valid based on the snapshot.
+			if assignment.distribution.iter().any(|(t, _)| !targets.contains(t)) {
+				return Err(FeasibilityError::InvalidVote)
+			}
+			Ok(())
+		})
+		.collect::<Result<(), FeasibilityError>>()?;
+
+	// ----- Start building support. First, we need one more closure.
+	let stake_of = helpers::stake_of_fn::<T, _>(&snapshot_voters, &cache);
+
+	// This might fail if the normalization fails. Very unlikely. See `integrity_test`.
+	let staked_assignments =
+		sp_npos_elections::assignment_ratio_to_staked_normalized(assignments, stake_of)
+			.map_err::<FeasibilityError, _>(Into::into)?;
+
+	let supports = sp_npos_elections::to_supports(&staked_assignments);
+
+	// Ensure some heuristics. These conditions must hold in the **entire** support, this is
+	// just a single page. But, they must hold in a single page as well.
+	ensure!((supports.len() as u32) <= desired_targets, FeasibilityError::WrongWinnerCount);
+
+	// almost-defensive-only: `MaxBackersPerWinner` is already checked. A sane value of
+	// `MaxWinnersPerPage` should be more than any possible value of `desired_targets()`, which
+	// is ALSO checked, so this conversion can almost never fail.
+	let bounded_supports =
+		supports.try_into().map_err(|_| FeasibilityError::FailedToBoundSupport)?;
+	Ok(bounded_supports)
+}
+
+impl<T: Config> Verifier for Pallet<T> {
+	type AccountId = T::AccountId;
+	type Solution = SolutionOf<T::MinerConfig>;
+	type MaxBackersPerWinner = T::MaxBackersPerWinner;
+	type MaxWinnersPerPage = T::MaxWinnersPerPage;
+	type MaxBackersPerWinnerFinal = T::MaxBackersPerWinnerFinal;
+
+	fn set_minimum_score(score: ElectionScore) {
+		MinimumScore::<T>::put(score);
+	}
+
+	fn ensure_claimed_score_improves(claimed_score: ElectionScore) -> bool {
+		Self::ensure_score_quality(claimed_score).is_ok()
+	}
+
+	fn queued_score() -> Option<ElectionScore> {
+		QueuedSolution::<T>::queued_score()
+	}
+
+	fn kill() {
+		QueuedSolution::<T>::kill();
+		<StatusStorage<T>>::put(Status::Nothing);
+	}
+
+	fn get_queued_solution_page(page: PageIndex) -> Option<SupportsOfVerifier<Self>> {
+		QueuedSolution::<T>::get_queued_solution_page(page)
+	}
+
+	fn verify_synchronous(
+		partial_solution: Self::Solution,
+		claimed_score: ElectionScore,
+		page: PageIndex,
+	) -> Result<SupportsOfVerifier<Self>, FeasibilityError> {
+		let maybe_current_score = Self::queued_score();
+		match Self::do_verify_synchronous(partial_solution, claimed_score, page) {
+			Ok(supports) => {
+				sublog!(
+					info,
+					"verifier",
+					"queued a sync solution with score {:?} for page {}",
+					claimed_score,
+					page
+				);
+				Self::deposit_event(Event::<T>::Verified(page, supports.len() as u32));
+				Self::deposit_event(Event::<T>::Queued(claimed_score, maybe_current_score));
+				Ok(supports)
+			},
+			Err(fe) => {
+				sublog!(
+					warn,
+					"verifier",
+					"sync verification of page {} failed due to {:?}.",
+					page,
+					fe
+				);
+				Self::deposit_event(Event::<T>::VerificationFailed(page, fe.clone()));
+				Err(fe)
+			},
+		}
+	}
+
+	fn force_set_single_page_valid(
+		partial_supports: SupportsOfVerifier<Self>,
+		page: PageIndex,
+		score: ElectionScore,
+	) {
+		Self::deposit_event(Event::<T>::Queued(score, QueuedSolution::<T>::queued_score()));
+		QueuedSolution::<T>::force_set_single_page_valid(page, partial_supports, score);
+	}
+}
+
+impl<T: Config> AsynchronousVerifier for Pallet<T> {
+	type SolutionDataProvider = T::SolutionDataProvider;
+
+	fn status() -> Status {
+		Pallet::<T>::status_storage()
+	}
+
+	fn start() -> Result<(), &'static str> {
+		sublog!(info, "verifier", "start signal received.");
+		if let Status::Nothing = Self::status() {
+			let claimed_score = Self::SolutionDataProvider::get_score().unwrap_or_default();
+			if Self::ensure_score_quality(claimed_score).is_err() {
+				// don't do anything, report back that this solution was garbage.
+				Self::deposit_event(Event::<T>::VerificationFailed(
+					crate::Pallet::<T>::msp(),
+					FeasibilityError::ScoreTooLow,
+				));
+				T::SolutionDataProvider::report_result(VerificationResult::Rejected);
+				// Despite being an instant-reject, this was a successful `start` operation.
+				Ok(())
+			} else {
+				// This solution is good enough to win, we start verifying it in the next block.
+				StatusStorage::<T>::put(Status::Ongoing(crate::Pallet::<T>::msp()));
+				Ok(())
+			}
+		} else {
+			sublog!(warn, "verifier", "start signal received while busy. This will be ignored.");
+			Err("verification ongoing")
+		}
+	}
+
+	fn stop() {
+		sublog!(warn, "verifier", "stop signal received. clearing everything.");
+
+		// we clear any ongoing solution's no been verified in any case, although this should only
+		// exist if we were doing something.
+		#[cfg(debug_assertions)]
+		assert!(
+			!matches!(StatusStorage::<T>::get(), Status::Ongoing(_)) ||
+				(matches!(StatusStorage::<T>::get(), Status::Ongoing(_)) &&
+					QueuedSolution::<T>::invalid_iter().count() > 0)
+		);
+		QueuedSolution::<T>::clear_invalid_and_backings_unchecked();
+
+		// we also mutate the status back to doing nothing.
+		StatusStorage::<T>::mutate(|old| {
+			if matches!(old, Status::Ongoing(_)) {
+				T::SolutionDataProvider::report_result(VerificationResult::Rejected)
+			}
+			*old = Status::Nothing;
+		});
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/verifier/mod.rs b/substrate/frame/election-provider-multi-block/src/verifier/mod.rs
new file mode 100644
index 00000000000..98391daa546
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/verifier/mod.rs
@@ -0,0 +1,271 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! # The Verifier Pallet
+//!
+//! ### *Feasibility* Check
+//!
+//! Before explaining the pallet itself, it should be explained what a *verification* even means.
+//! Verification of a solution page ([`crate::unsigned::miner::MinerConfig::Solution`]) includes the
+//! process of checking all of its edges against a snapshot to be correct. For instance, all voters
+//! that are presented in a solution page must have actually voted for the winner that they are
+//! backing, based on the snapshot kept in the parent pallet.
+//!
+//! After checking all of the edges, a handful of other checks are performed:
+//!
+//! 1. Check that the total number of winners is sufficient (`DesiredTargets`).
+//! 2. Check that the claimed score ([`sp_npos_elections::ElectionScore`]) is correct,
+//!   3. and more than the minimum score that can be specified via [`Verifier::set_minimum_score`].
+//! 4. Check that all of the bounds of the solution are respected, namely
+//!    [`Verifier::MaxBackersPerWinner`], [`Verifier::MaxWinnersPerPage`] and
+//!    [`Verifier::MaxBackersPerWinnerFinal`].
+//!
+//! Note that the common factor of all of these checks is that they can ONLY be checked after all
+//! pages are already verified. So, In the case of a multi-page verification, these checks are
+//! performed at the last page.
+//!
+//! The errors that can arise while performing the feasibility check are encapsulated in
+//! [`verifier::FeasibilityError`].
+//!
+//! ## Modes of Verification
+//!
+//! The verifier pallet provide two modes of functionality:
+//!
+//! 1. Single-page, synchronous verification. This is useful in the context of single-page,
+//!    emergency, or unsigned solutions that need to be verified on the fly. This is similar to how
+//!    the old school `multi-phase` pallet works.
+//! 2. Multi-page, asynchronous verification. This is useful in the context of multi-page, signed
+//!    solutions.
+//!
+//! Both of this, plus some helper functions, is exposed via the [`Verifier`] trait.
+//!
+//! ## Queued Solution
+//!
+//! once a solution has been verified, it is called a *queued solution*. It is sitting in a queue,
+//! waiting for either of:
+//!
+//! 1. being challenged and potentially replaced by better solution, if any.
+//! 2. being exported as the final outcome of the election.
+
+#[cfg(feature = "runtime-benchmarks")]
+pub mod benchmarking;
+mod impls;
+#[cfg(test)]
+mod tests;
+
+// internal imports
+use frame_election_provider_support::PageIndex;
+use impls::SupportsOfVerifier;
+pub use impls::{feasibility_check_page_inner_with_snapshot, pallet::*, Status};
+use sp_core::Get;
+use sp_npos_elections::ElectionScore;
+use sp_std::{fmt::Debug, prelude::*};
+
+pub use crate::weights::measured::pallet_election_provider_multi_block_verifier::*;
+
+/// Errors that can happen in the feasibility check.
+#[derive(Debug, Eq, PartialEq, codec::Encode, codec::Decode, scale_info::TypeInfo, Clone)]
+pub enum FeasibilityError {
+	/// Wrong number of winners presented.
+	WrongWinnerCount,
+	/// The snapshot is not available.
+	///
+	/// Kinda defensive: The pallet should technically never attempt to do a feasibility check
+	/// when no snapshot is present.
+	SnapshotUnavailable,
+	/// A vote is invalid.
+	InvalidVote,
+	/// A voter is invalid.
+	InvalidVoter,
+	/// A winner is invalid.
+	InvalidWinner,
+	/// The given score was invalid.
+	InvalidScore,
+	/// The provided round is incorrect.
+	InvalidRound,
+	/// Solution does not have a good enough score.
+	ScoreTooLow,
+	/// The support type failed to be bounded.
+	///
+	/// Relates to [`Config::MaxWinnersPerPage`], [`Config::MaxBackersPerWinner`] or
+	/// `MaxBackersPerWinnerFinal`
+	FailedToBoundSupport,
+	/// Internal error from the election crate.
+	NposElection(sp_npos_elections::Error),
+	/// The solution is incomplete, it has too few pages.
+	///
+	/// This is (somewhat) synonym to `WrongPageCount` in other places.
+	Incomplete,
+}
+
+impl From<sp_npos_elections::Error> for FeasibilityError {
+	fn from(e: sp_npos_elections::Error) -> Self {
+		FeasibilityError::NposElection(e)
+	}
+}
+
+/// The interface of something that can verify solutions for other sub-pallets in the multi-block
+/// election pallet-network.
+pub trait Verifier {
+	/// The solution type.
+	type Solution;
+	/// The account if type.
+	type AccountId;
+
+	/// Maximum number of winners that can be represented in each page.
+	///
+	/// A reasonable value for this should be the maximum number of winners that the election user
+	/// (e.g. the staking pallet) could ever desire.
+	type MaxWinnersPerPage: Get<u32>;
+	/// Maximum number of backers, per winner, among all pages of an election.
+	///
+	/// This can only be checked at the very final step of verification.
+	type MaxBackersPerWinnerFinal: Get<u32>;
+	/// Maximum number of backers that each winner could have, per page.
+	type MaxBackersPerWinner: Get<u32>;
+
+	/// Set the minimum score that is acceptable for any solution.
+	///
+	/// Henceforth, all solutions must have at least this degree of quality, single-page or
+	/// multi-page.
+	fn set_minimum_score(score: ElectionScore);
+
+	/// The score of the current best solution. `None` if there is none.
+	fn queued_score() -> Option<ElectionScore>;
+
+	/// Check if the claimed score is sufficient to challenge the current queued solution, if any.
+	fn ensure_claimed_score_improves(claimed_score: ElectionScore) -> bool;
+
+	/// Clear all storage items, there's nothing else to do until further notice.
+	fn kill();
+
+	/// Get a single page of the best verified solution, if any.
+	///
+	/// It is the responsibility of the call site to call this function with all appropriate
+	/// `page` arguments.
+	fn get_queued_solution_page(page: PageIndex) -> Option<SupportsOfVerifier<Self>>;
+
+	/// Perform the feasibility check on the given single-page solution.
+	///
+	/// This will perform:
+	///
+	/// 1. feasibility-check
+	/// 2. claimed score is correct and an improvement.
+	/// 3. bounds are respected
+	///
+	/// Corresponding snapshot (represented by `page`) is assumed to be available.
+	///
+	/// If all checks pass, the solution is also queued.
+	fn verify_synchronous(
+		partial_solution: Self::Solution,
+		claimed_score: ElectionScore,
+		page: PageIndex,
+	) -> Result<SupportsOfVerifier<Self>, FeasibilityError>;
+
+	/// Force set a single page solution as the valid one.
+	///
+	/// Will erase any previous solution. Should only be used in case of emergency fallbacks,
+	/// trusted governance solutions and so on.
+	fn force_set_single_page_valid(
+		partial_supports: SupportsOfVerifier<Self>,
+		page: PageIndex,
+		score: ElectionScore,
+	);
+}
+
+/// Simple enum to encapsulate the result of the verification of a candidate solution.
+#[derive(Clone, Copy, Debug)]
+#[cfg_attr(test, derive(PartialEq, Eq))]
+pub enum VerificationResult {
+	/// Solution is valid and is queued.
+	Queued,
+	/// Solution is rejected, for whichever of the multiple reasons that it could be.
+	Rejected,
+	/// The data needed (solution pages or the score) was unavailable. This should rarely happen.
+	DataUnavailable,
+}
+
+/// Something that can provide candidate solutions to the verifier.
+///
+/// In reality, this can be implemented by the [`crate::signed::Pallet`], where signed solutions are
+/// queued and sorted based on claimed score, and they are put forth one by one, from best to worse.
+pub trait SolutionDataProvider {
+	/// The opaque solution type.
+	type Solution;
+
+	/// Return the `page`th page of the current best solution that the data provider has in store.
+	///
+	/// If no candidate solutions are available, then None is returned.
+	fn get_page(page: PageIndex) -> Option<Self::Solution>;
+
+	/// Get the claimed score of the current best solution.
+	fn get_score() -> Option<ElectionScore>;
+
+	/// Hook to report back the results of the verification of the current candidate solution that
+	/// is being exposed via [`Self::get_page`] and [`Self::get_score`].
+	///
+	/// Every time that this is called, the verifier [`AsynchronousVerifier`] goes back to the
+	/// [`Status::Nothing`] state, and it is the responsibility of [`Self`] to call `start` again,
+	/// if desired.
+	fn report_result(result: VerificationResult);
+}
+
+/// Something that can do the verification asynchronously.
+pub trait AsynchronousVerifier: Verifier {
+	/// The data provider that can provide the candidate solution, and to whom we report back the
+	/// results.
+	type SolutionDataProvider: SolutionDataProvider;
+
+	/// Get the current stage of the verification process.
+	fn status() -> Status;
+
+	/// Start a verification process.
+	///
+	/// Returns `Ok(())` if verification started successfully, and `Err(..)` if a verification is
+	/// already ongoing and therefore a new one cannot be started.
+	///
+	/// From the coming block onwards, the verifier will start and fetch the relevant information
+	/// and solution pages from [`SolutionDataProvider`]. It is expected that the
+	/// [`SolutionDataProvider`] is ready before calling [`Self::start`].
+	///
+	/// Pages of the solution are fetched sequentially and in order from [`SolutionDataProvider`],
+	/// from `msp` to `lsp`.
+	///
+	/// This ends in either of the two:
+	///
+	/// 1. All pages, including the final checks (like score and other facts that can only be
+	///    derived from a full solution) are valid and the solution is verified. The solution is
+	///    queued and is ready for further export.
+	/// 2. The solution checks verification at one of the steps. Nothing is stored inside the
+	///    verifier pallet and all intermediary data is removed.
+	///
+	/// In both cases, the [`SolutionDataProvider`] is informed via
+	/// [`SolutionDataProvider::report_result`]. It is sensible for the data provide to call `start`
+	/// again if the verification has failed, and nothing otherwise. Indeed, the
+	/// [`SolutionDataProvider`] must adjust its internal state such that it returns a new candidate
+	/// solution after each failure.
+	fn start() -> Result<(), &'static str>;
+
+	/// Stop the verification.
+	///
+	/// This is a force-stop operation, and should only be used in extreme cases where the
+	/// [`SolutionDataProvider`] wants to suddenly bail-out.
+	///
+	/// An implementation should make sure that no loose ends remain state-wise, and everything is
+	/// cleaned.
+	fn stop();
+}
diff --git a/substrate/frame/election-provider-multi-block/src/verifier/tests.rs b/substrate/frame/election-provider-multi-block/src/verifier/tests.rs
new file mode 100644
index 00000000000..6fd06923284
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/verifier/tests.rs
@@ -0,0 +1,1266 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::{
+	mock::*,
+	types::*,
+	verifier::{impls::Status, *},
+	*,
+};
+
+use frame_election_provider_support::Support;
+use frame_support::{assert_noop, assert_ok};
+use sp_runtime::traits::Bounded;
+
+mod feasibility_check {
+	use super::*;
+
+	#[test]
+	fn missing_snapshot() {
+		ExtBuilder::verifier().build_unchecked().execute_with(|| {
+			// create snapshot just so that we can create a solution..
+			roll_to_snapshot_created();
+			let paged = mine_full_solution().unwrap();
+
+			// ..remove the only page of the target snapshot.
+			crate::Snapshot::<Runtime>::remove_target_page();
+
+			assert_noop!(
+				VerifierPallet::feasibility_check_page_inner(paged.solution_pages[0].clone(), 0),
+				FeasibilityError::SnapshotUnavailable
+			);
+		});
+
+		ExtBuilder::verifier().pages(2).build_unchecked().execute_with(|| {
+			roll_to_snapshot_created();
+			let paged = mine_full_solution().unwrap();
+
+			// ..remove just one of the pages of voter snapshot that is relevant.
+			crate::Snapshot::<Runtime>::remove_voter_page(0);
+
+			assert_noop!(
+				VerifierPallet::feasibility_check_page_inner(paged.solution_pages[0].clone(), 0),
+				FeasibilityError::SnapshotUnavailable
+			);
+		});
+
+		ExtBuilder::verifier().pages(2).build_unchecked().execute_with(|| {
+			roll_to_snapshot_created();
+			let paged = mine_full_solution().unwrap();
+
+			// ..removing this page is not important, because we check page 0.
+			crate::Snapshot::<Runtime>::remove_voter_page(1);
+
+			assert_ok!(VerifierPallet::feasibility_check_page_inner(
+				paged.solution_pages[0].clone(),
+				0
+			));
+		});
+
+		ExtBuilder::verifier().pages(2).build_unchecked().execute_with(|| {
+			roll_to_snapshot_created();
+			let paged = mine_full_solution().unwrap();
+
+			// `DesiredTargets` missing is also an error
+			crate::Snapshot::<Runtime>::kill_desired_targets();
+
+			assert_noop!(
+				VerifierPallet::feasibility_check_page_inner(paged.solution_pages[0].clone(), 0),
+				FeasibilityError::SnapshotUnavailable
+			);
+		});
+
+		ExtBuilder::verifier().pages(2).build_unchecked().execute_with(|| {
+			roll_to_snapshot_created();
+			let paged = mine_full_solution().unwrap();
+
+			// `DesiredTargets` is not checked here.
+			crate::Snapshot::<Runtime>::remove_target_page();
+
+			assert_noop!(
+				VerifierPallet::feasibility_check_page_inner(paged.solution_pages[1].clone(), 0),
+				FeasibilityError::SnapshotUnavailable
+			);
+		});
+	}
+
+	#[test]
+	fn winner_indices_single_page_must_be_in_bounds() {
+		ExtBuilder::verifier().pages(1).desired_targets(2).build_and_execute(|| {
+			roll_to_snapshot_created();
+			let mut paged = mine_full_solution().unwrap();
+			assert_eq!(crate::Snapshot::<Runtime>::targets().unwrap().len(), 4);
+			// ----------------------------------------------------^^ valid range is [0..3].
+
+			// Swap all votes from 3 to 4. here are only 4 targets, so index 4 is invalid.
+			paged.solution_pages[0]
+				.votes1
+				.iter_mut()
+				.filter(|(_, t)| *t == TargetIndex::from(3u16))
+				.for_each(|(_, t)| *t += 1);
+
+			assert_noop!(
+				VerifierPallet::feasibility_check_page_inner(paged.solution_pages[0].clone(), 0),
+				FeasibilityError::NposElection(sp_npos_elections::Error::SolutionInvalidIndex)
+			);
+		})
+	}
+
+	#[test]
+	fn voter_indices_per_page_must_be_in_bounds() {
+		ExtBuilder::verifier()
+			.pages(1)
+			.voter_per_page(Bounded::max_value())
+			.desired_targets(2)
+			.build_and_execute(|| {
+				roll_to_snapshot_created();
+				let mut paged = mine_full_solution().unwrap();
+
+				assert_eq!(crate::Snapshot::<Runtime>::voters(0).unwrap().len(), 12);
+				// ------------------------------------------------^^ valid range is [0..11] in page
+				// 0.
+
+				// Check that there is an index 11 in votes1, and flip to 12. There are only 12
+				// voters, so index 12 is invalid.
+				assert!(
+					paged.solution_pages[0]
+						.votes1
+						.iter_mut()
+						.filter(|(v, _)| *v == VoterIndex::from(11u32))
+						.map(|(v, _)| *v = 12)
+						.count() > 0
+				);
+				assert_noop!(
+					VerifierPallet::feasibility_check_page_inner(
+						paged.solution_pages[0].clone(),
+						0
+					),
+					FeasibilityError::NposElection(sp_npos_elections::Error::SolutionInvalidIndex),
+				);
+			})
+	}
+
+	#[test]
+	fn voter_must_have_same_targets_as_snapshot() {
+		ExtBuilder::verifier()
+			.pages(1)
+			.voter_per_page(Bounded::max_value())
+			.desired_targets(2)
+			.build_and_execute(|| {
+				roll_to_snapshot_created();
+				let mut paged = mine_full_solution().unwrap();
+
+				// First, check that voter at index 11 (40) actually voted for 3 (40) -- this is
+				// self vote. Then, change the vote to 2 (30).
+				assert_eq!(
+					paged.solution_pages[0]
+						.votes1
+						.iter_mut()
+						.filter(|(v, t)| *v == 11 && *t == 3)
+						.map(|(_, t)| *t = 2)
+						.count(),
+					1,
+				);
+				assert_noop!(
+					VerifierPallet::feasibility_check_page_inner(
+						paged.solution_pages[0].clone(),
+						0
+					),
+					FeasibilityError::InvalidVote,
+				);
+			})
+	}
+
+	#[test]
+	fn heuristic_max_backers_per_winner_per_page() {
+		ExtBuilder::verifier().max_backers_per_winner(2).build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			// these votes are all valid, but some dude has 3 supports in a single page.
+			let solution = solution_from_supports(
+				vec![(40, Support { total: 30, voters: vec![(2, 10), (3, 10), (4, 10)] })],
+				// all these voters are in page of the snapshot, the msp!
+				2,
+			);
+
+			assert_noop!(
+				VerifierPallet::feasibility_check_page_inner(solution, 2),
+				FeasibilityError::FailedToBoundSupport,
+			);
+		})
+	}
+
+	#[test]
+	fn heuristic_desired_target_check_per_page() {
+		ExtBuilder::verifier().desired_targets(2).build_and_execute(|| {
+			roll_to(25);
+			assert_full_snapshot();
+
+			// all of these votes are valid, but this solution is already presenting 3 winners,
+			// while we just one 2.
+			let solution = solution_from_supports(
+				vec![
+					(10, Support { total: 30, voters: vec![(4, 2)] }),
+					(20, Support { total: 30, voters: vec![(4, 2)] }),
+					(40, Support { total: 30, voters: vec![(4, 6)] }),
+				],
+				// all these voters are in page 2 of the snapshot, the msp!
+				2,
+			);
+
+			assert_noop!(
+				VerifierPallet::feasibility_check_page_inner(solution, 2),
+				FeasibilityError::WrongWinnerCount,
+			);
+		})
+	}
+}
+
+mod async_verification {
+	use sp_core::bounded_vec;
+
+	use super::*;
+	// disambiguate event
+	use crate::verifier::Event;
+
+	#[test]
+	fn basic_single_verification_works() {
+		ExtBuilder::verifier().pages(1).build_and_execute(|| {
+			// load a solution after the snapshot has been created.
+			roll_to_snapshot_created();
+
+			let solution = mine_full_solution().unwrap();
+			load_mock_signed_and_start(solution.clone());
+
+			// now let it verify
+			roll_next();
+
+			// It done after just one block.
+			assert_eq!(VerifierPallet::status(), Status::Nothing);
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::<Runtime>::Verified(0, 2),
+					Event::<Runtime>::Queued(solution.score, None)
+				]
+			);
+			assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]);
+		});
+	}
+
+	#[test]
+	fn basic_multi_verification_works() {
+		ExtBuilder::verifier().pages(3).build_and_execute(|| {
+			// load a solution after the snapshot has been created.
+			roll_to_snapshot_created();
+
+			let solution = mine_full_solution().unwrap();
+			// ------------- ^^^^^^^^^^^^
+
+			load_mock_signed_and_start(solution.clone());
+			assert_eq!(VerifierPallet::status(), Status::Ongoing(2));
+			assert_eq!(QueuedSolution::<Runtime>::valid_iter().count(), 0);
+
+			// now let it verify
+			roll_next();
+			assert_eq!(VerifierPallet::status(), Status::Ongoing(1));
+			assert_eq!(verifier_events(), vec![Event::<Runtime>::Verified(2, 2)]);
+			// 1 page verified, stored as invalid.
+			assert_eq!(QueuedSolution::<Runtime>::invalid_iter().count(), 1);
+
+			roll_next();
+			assert_eq!(VerifierPallet::status(), Status::Ongoing(0));
+			assert_eq!(
+				verifier_events(),
+				vec![Event::<Runtime>::Verified(2, 2), Event::<Runtime>::Verified(1, 2),]
+			);
+			// 2 pages verified, stored as invalid.
+			assert_eq!(QueuedSolution::<Runtime>::invalid_iter().count(), 2);
+
+			// nothing is queued yet.
+			assert_eq!(MockSignedResults::get(), vec![]);
+			assert_eq!(QueuedSolution::<Runtime>::valid_iter().count(), 0);
+			assert!(QueuedSolution::<Runtime>::queued_score().is_none());
+
+			// last block.
+			roll_next();
+			assert_eq!(VerifierPallet::status(), Status::Nothing);
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::<Runtime>::Verified(2, 2),
+					Event::<Runtime>::Verified(1, 2),
+					Event::<Runtime>::Verified(0, 2),
+					Event::<Runtime>::Queued(solution.score, None),
+				]
+			);
+			assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]);
+
+			// a solution has been queued
+			assert_eq!(QueuedSolution::<Runtime>::valid_iter().count(), 3);
+			assert!(QueuedSolution::<Runtime>::queued_score().is_some());
+		});
+	}
+
+	#[test]
+	fn basic_multi_verification_partial() {
+		ExtBuilder::verifier().pages(3).build_and_execute(|| {
+			// load a solution after the snapshot has been created.
+			roll_to_snapshot_created();
+
+			let solution = mine_solution(2).unwrap();
+			// -------------------------^^^
+
+			load_mock_signed_and_start(solution.clone());
+
+			assert_eq!(VerifierPallet::status(), Status::Ongoing(2));
+			assert_eq!(QueuedSolution::<Runtime>::valid_iter().count(), 0);
+
+			// now let it verify
+			roll_next();
+			assert_eq!(VerifierPallet::status(), Status::Ongoing(1));
+			assert_eq!(verifier_events(), vec![Event::<Runtime>::Verified(2, 2)]);
+			// 1 page verified, stored as invalid.
+			assert_eq!(QueuedSolution::<Runtime>::invalid_iter().count(), 1);
+
+			roll_next();
+			assert_eq!(VerifierPallet::status(), Status::Ongoing(0));
+			assert_eq!(
+				verifier_events(),
+				vec![Event::<Runtime>::Verified(2, 2), Event::<Runtime>::Verified(1, 2),]
+			);
+			// 2 page verified, stored as invalid.
+			assert_eq!(QueuedSolution::<Runtime>::invalid_iter().count(), 2);
+
+			// nothing is queued yet.
+			assert_eq!(MockSignedResults::get(), vec![]);
+			assert_eq!(QueuedSolution::<Runtime>::valid_iter().count(), 0);
+			assert!(QueuedSolution::<Runtime>::queued_score().is_none());
+
+			roll_next();
+			assert_eq!(VerifierPallet::status(), Status::Nothing);
+
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::<Runtime>::Verified(2, 2),
+					Event::<Runtime>::Verified(1, 2),
+					// this is a partial solution, no one in this page (lsp).
+					Event::<Runtime>::Verified(0, 0),
+					Event::<Runtime>::Queued(solution.score, None),
+				]
+			);
+
+			// a solution has been queued
+			assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]);
+			assert_eq!(QueuedSolution::<Runtime>::valid_iter().count(), 3);
+			assert!(QueuedSolution::<Runtime>::queued_score().is_some());
+
+			// page 0 is empty..
+			assert_eq!(QueuedSolution::<Runtime>::get_valid_page(0).unwrap().len(), 0);
+			// .. the other two are not.
+			assert_eq!(QueuedSolution::<Runtime>::get_valid_page(1).unwrap().len(), 2);
+			assert_eq!(QueuedSolution::<Runtime>::get_valid_page(2).unwrap().len(), 2);
+		});
+	}
+
+	#[test]
+	fn solution_data_provider_failing_initial() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			// not super important, but anyways..
+			roll_to_snapshot_created();
+
+			// The solution data provider is empty.
+			assert_eq!(SignedPhaseSwitch::get(), SignedSwitch::Mock);
+			assert_eq!(MockSignedNextSolution::get(), None);
+
+			// nothing happens..
+			assert_eq!(VerifierPallet::status(), Status::Nothing);
+			assert_ok!(<VerifierPallet as AsynchronousVerifier>::start());
+			assert_eq!(VerifierPallet::status(), Status::Ongoing(2));
+
+			roll_next();
+
+			// we instantly stop.
+			assert_eq!(verifier_events(), vec![Event::<Runtime>::VerificationDataUnavailable]);
+			assert_eq!(VerifierPallet::status(), Status::Nothing);
+			assert!(QueuedSolution::<Runtime>::invalid_iter().count().is_zero());
+			assert!(QueuedSolution::<Runtime>::backing_iter().count().is_zero());
+
+			// and we report invalid back.
+			assert_eq!(MockSignedResults::get(), vec![VerificationResult::DataUnavailable]);
+		});
+	}
+
+	#[test]
+	fn solution_data_provider_failing_midway() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			let solution = mine_full_solution().unwrap();
+			load_mock_signed_and_start(solution.clone());
+
+			assert_eq!(VerifierPallet::status(), Status::Ongoing(2));
+
+			// now let it verify. first one goes fine.
+			roll_next();
+			assert_eq!(VerifierPallet::status(), Status::Ongoing(1));
+			assert_eq!(verifier_events(), vec![Event::<Runtime>::Verified(2, 2)]);
+			assert_eq!(MockSignedResults::get(), vec![]);
+
+			// 1 page verified, stored as invalid.
+			assert_eq!(QueuedSolution::<Runtime>::invalid_iter().count(), 1);
+			assert_eq!(QueuedSolution::<Runtime>::backing_iter().count(), 1);
+			assert_eq!(QueuedSolution::<Runtime>::valid_iter().count(), 0);
+
+			// suddenly clear this guy.
+			MockSignedNextSolution::set(None);
+			MockSignedNextScore::set(None);
+
+			roll_next();
+
+			// we instantly stop.
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::<Runtime>::Verified(2, 2),
+					Event::<Runtime>::VerificationDataUnavailable
+				]
+			);
+			assert_eq!(VerifierPallet::status(), Status::Nothing);
+			assert_eq!(QueuedSolution::<Runtime>::invalid_iter().count(), 0);
+			assert_eq!(QueuedSolution::<Runtime>::valid_iter().count(), 0);
+			assert_eq!(QueuedSolution::<Runtime>::backing_iter().count(), 0);
+
+			// and we report invalid back.
+			assert_eq!(MockSignedResults::get(), vec![VerificationResult::DataUnavailable]);
+		})
+	}
+
+	#[test]
+	fn rejects_new_verification_via_start_if_ongoing() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			let solution = mine_full_solution().unwrap();
+			load_mock_signed_and_start(solution.clone());
+
+			assert_eq!(VerifierPallet::status(), Status::Ongoing(2));
+
+			// nada
+			assert_noop!(<VerifierPallet as AsynchronousVerifier>::start(), "verification ongoing");
+
+			// now let it verify. first one goes fine.
+			roll_next();
+			assert_eq!(VerifierPallet::status(), Status::Ongoing(1));
+			assert_eq!(verifier_events(), vec![Event::<Runtime>::Verified(2, 2)]);
+			assert_eq!(MockSignedResults::get(), vec![]);
+
+			// retry, still nada.
+			assert_noop!(<VerifierPallet as AsynchronousVerifier>::start(), "verification ongoing");
+		})
+	}
+
+	#[test]
+	fn stop_clears_everything() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			let solution = mine_full_solution().unwrap();
+			load_mock_signed_and_start(solution.clone());
+
+			assert_eq!(VerifierPallet::status(), Status::Ongoing(2));
+
+			roll_next();
+			assert_eq!(VerifierPallet::status(), Status::Ongoing(1));
+			assert_eq!(verifier_events(), vec![Event::<Runtime>::Verified(2, 2)]);
+
+			roll_next();
+			assert_eq!(VerifierPallet::status(), Status::Ongoing(0));
+			assert_eq!(
+				verifier_events(),
+				vec![Event::<Runtime>::Verified(2, 2), Event::<Runtime>::Verified(1, 2)]
+			);
+
+			// now suddenly, we stop
+			<VerifierPallet as AsynchronousVerifier>::stop();
+			assert_eq!(VerifierPallet::status(), Status::Nothing);
+
+			// everything is cleared.
+			assert_eq!(QueuedSolution::<Runtime>::invalid_iter().count(), 0);
+			assert_eq!(QueuedSolution::<Runtime>::valid_iter().count(), 0);
+			assert_eq!(QueuedSolution::<Runtime>::backing_iter().count(), 0);
+
+			// and we report invalid back that something was rejected.
+			assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]);
+		})
+	}
+
+	#[test]
+	fn weak_valid_solution_is_insta_rejected() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			let paged = mine_full_solution().unwrap();
+			load_mock_signed_and_start(paged.clone());
+			let _ = roll_to_full_verification();
+
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::Verified(2, 2),
+					Event::Verified(1, 2),
+					Event::Verified(0, 2),
+					Event::Queued(paged.score, None)
+				]
+			);
+			assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]);
+
+			// good boi, but you are too weak. This solution also does not have the full pages,
+			// which is also fine. See `basic_multi_verification_partial`.
+			let weak_page_partial =
+				solution_from_supports(vec![(10, Support { total: 10, voters: vec![(1, 10)] })], 2);
+			let weak_paged = PagedRawSolution::<Runtime> {
+				solution_pages: bounded_vec![weak_page_partial],
+				score: ElectionScore { minimal_stake: 10, sum_stake: 10, sum_stake_squared: 100 },
+				..Default::default()
+			};
+
+			load_mock_signed_and_start(weak_paged.clone());
+			// this is insta-rejected, no need to proceed any more blocks.
+
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::Verified(2, 2),
+					Event::Verified(1, 2),
+					Event::Verified(0, 2),
+					Event::Queued(paged.score, None),
+					Event::VerificationFailed(2, FeasibilityError::ScoreTooLow)
+				]
+			);
+
+			assert_eq!(
+				MockSignedResults::get(),
+				vec![VerificationResult::Queued, VerificationResult::Rejected]
+			);
+		})
+	}
+
+	#[test]
+	fn better_valid_solution_replaces() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			// a weak one, which we will still accept.
+			let weak_page_partial = solution_from_supports(
+				vec![
+					(10, Support { total: 10, voters: vec![(1, 10)] }),
+					(20, Support { total: 10, voters: vec![(4, 10)] }),
+				],
+				2,
+			);
+			let weak_paged = PagedRawSolution::<Runtime> {
+				solution_pages: bounded_vec![weak_page_partial],
+				score: ElectionScore { minimal_stake: 10, sum_stake: 20, sum_stake_squared: 200 },
+				..Default::default()
+			};
+
+			load_mock_signed_and_start(weak_paged.clone());
+			let _ = roll_to_full_verification();
+
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::Verified(2, 2),
+					Event::Verified(1, 0), // note: partial solution!
+					Event::Verified(0, 0), // note: partial solution!
+					Event::Queued(weak_paged.score, None)
+				]
+			);
+			assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]);
+
+			let paged = mine_full_solution().unwrap();
+			load_mock_signed_and_start(paged.clone());
+			let _ = roll_to_full_verification();
+
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::Verified(2, 2),
+					Event::Verified(1, 0),
+					Event::Verified(0, 0),
+					Event::Queued(weak_paged.score, None),
+					Event::Verified(2, 2),
+					Event::Verified(1, 2),
+					Event::Verified(0, 2),
+					Event::Queued(paged.score, Some(weak_paged.score))
+				]
+			);
+			assert_eq!(
+				MockSignedResults::get(),
+				vec![VerificationResult::Queued, VerificationResult::Queued]
+			);
+		})
+	}
+
+	#[test]
+	fn invalid_solution_bad_score() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+			let mut paged = mine_full_solution().unwrap();
+
+			// just tweak score.
+			paged.score.minimal_stake += 1;
+			assert!(<VerifierPallet as Verifier>::queued_score().is_none());
+
+			load_mock_signed_and_start(paged);
+			roll_to_full_verification();
+
+			// nothing is verified.
+			assert!(<VerifierPallet as Verifier>::queued_score().is_none());
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::<Runtime>::Verified(2, 2),
+					Event::<Runtime>::Verified(1, 2),
+					Event::<Runtime>::Verified(0, 2),
+					Event::<Runtime>::VerificationFailed(0, FeasibilityError::InvalidScore)
+				]
+			);
+
+			assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]);
+		})
+	}
+
+	#[test]
+	fn invalid_solution_bad_minimum_score() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+			let paged = mine_full_solution().unwrap();
+
+			// our minimum score is our score, just a bit better.
+			let mut better_score = paged.score;
+			better_score.minimal_stake += 1;
+			<VerifierPallet as Verifier>::set_minimum_score(better_score);
+
+			load_mock_signed_and_start(paged);
+
+			// note that we don't need to call to `roll_to_full_verification`, since this solution
+			// is pretty much insta-rejected;
+			assert_eq!(
+				verifier_events(),
+				vec![Event::<Runtime>::VerificationFailed(2, FeasibilityError::ScoreTooLow)]
+			);
+
+			// nothing is verified..
+			assert!(<VerifierPallet as Verifier>::queued_score().is_none());
+
+			// result is reported back.
+			assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]);
+		})
+	}
+
+	#[test]
+	fn invalid_solution_bad_desired_targets() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+			assert_eq!(crate::Snapshot::<Runtime>::desired_targets().unwrap(), 2);
+			let paged = mine_full_solution().unwrap();
+
+			// tweak this, for whatever reason.
+			crate::Snapshot::<Runtime>::set_desired_targets(3);
+
+			load_mock_signed_and_start(paged);
+			roll_to_full_verification();
+
+			// we detect this only in the last page.
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::Verified(2, 2),
+					Event::Verified(1, 2),
+					Event::Verified(0, 2),
+					Event::VerificationFailed(0, FeasibilityError::WrongWinnerCount)
+				]
+			);
+
+			// nothing is verified..
+			assert!(<VerifierPallet as Verifier>::queued_score().is_none());
+			// result is reported back.
+			assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]);
+		})
+	}
+
+	#[test]
+	fn invalid_solution_bad_bounds_per_page() {
+		ExtBuilder::verifier()
+			.desired_targets(1)
+			.max_backers_per_winner(1) // in each page we allow 1 baker to be presented.
+			.max_backers_per_winner_final(12)
+			.build_and_execute(|| {
+				roll_to_snapshot_created();
+
+				// This is a sneaky custom solution where it will fail in the second page.
+				let page0 = solution_from_supports(
+					vec![(10, Support { total: 10, voters: vec![(1, 10)] })],
+					2,
+				);
+				let page1 = solution_from_supports(
+					vec![(10, Support { total: 20, voters: vec![(5, 10), (8, 10)] })],
+					1,
+				);
+				let page2 = solution_from_supports(
+					vec![(10, Support { total: 10, voters: vec![(10, 10)] })],
+					0,
+				);
+				let paged = PagedRawSolution {
+					solution_pages: bounded_vec![page0, page1, page2],
+					score: Default::default(), // score is never checked, so nada
+					..Default::default()
+				};
+
+				load_mock_signed_and_start(paged);
+				roll_to_full_verification();
+
+				// we detect the bound issue in page 2.
+				assert_eq!(
+					verifier_events(),
+					vec![
+						Event::Verified(2, 1),
+						Event::VerificationFailed(1, FeasibilityError::FailedToBoundSupport)
+					]
+				);
+
+				// our state is fully cleaned.
+				QueuedSolution::<Runtime>::assert_killed();
+				assert_eq!(StatusStorage::<Runtime>::get(), Status::Nothing);
+				// nothing is verified..
+				assert!(<VerifierPallet as Verifier>::queued_score().is_none());
+				// result is reported back.
+				assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]);
+			})
+	}
+
+	#[test]
+	fn invalid_solution_bad_bounds_final() {
+		ExtBuilder::verifier()
+			.desired_targets(1)
+			.max_backers_per_winner_final(2)
+			.build_and_execute(|| {
+				roll_to_snapshot_created();
+
+				// This is a sneaky custom solution where in each page 10 has 1 backers, so only in
+				// the last page we can catch the mfer.
+				let page0 = solution_from_supports(
+					vec![(10, Support { total: 10, voters: vec![(1, 10)] })],
+					2,
+				);
+				let page1 = solution_from_supports(
+					vec![(10, Support { total: 10, voters: vec![(5, 10)] })],
+					1,
+				);
+				let page2 = solution_from_supports(
+					vec![(10, Support { total: 10, voters: vec![(10, 10)] })],
+					0,
+				);
+				let paged = PagedRawSolution {
+					solution_pages: bounded_vec![page0, page1, page2],
+					score: ElectionScore {
+						minimal_stake: 30,
+						sum_stake: 30,
+						sum_stake_squared: 900,
+					},
+					..Default::default()
+				};
+
+				load_mock_signed_and_start(paged);
+				roll_to_full_verification();
+
+				// we detect this only in the last page.
+				assert_eq!(
+					verifier_events(),
+					vec![
+						Event::Verified(2, 1),
+						Event::Verified(1, 1),
+						Event::Verified(0, 1),
+						Event::VerificationFailed(0, FeasibilityError::FailedToBoundSupport)
+					]
+				);
+
+				// our state is fully cleaned.
+				QueuedSolution::<Runtime>::assert_killed();
+				assert_eq!(StatusStorage::<Runtime>::get(), Status::Nothing);
+
+				// nothing is verified..
+				assert!(<VerifierPallet as Verifier>::queued_score().is_none());
+				// result is reported back.
+				assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]);
+			})
+	}
+
+	#[test]
+	fn invalid_solution_does_not_alter_queue() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+			let mut paged = mine_full_solution().unwrap();
+			let correct_score = paged.score;
+
+			assert!(<VerifierPallet as Verifier>::queued_score().is_none());
+
+			load_mock_signed_and_start(paged.clone());
+			roll_to_full_verification();
+
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), Some(correct_score));
+			assert!(QueuedSolution::<Runtime>::invalid_iter().count().is_zero());
+			assert!(QueuedSolution::<Runtime>::backing_iter().count().is_zero());
+
+			// just tweak score. Note that we tweak for a higher score, so the verifier will accept
+			// it.
+			paged.score.minimal_stake += 1;
+			load_mock_signed_and_start(paged.clone());
+			roll_to_full_verification();
+
+			// nothing is verified.
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), Some(correct_score));
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::<Runtime>::Verified(2, 2),
+					Event::<Runtime>::Verified(1, 2),
+					Event::<Runtime>::Verified(0, 2),
+					Event::<Runtime>::Queued(correct_score, None),
+					Event::<Runtime>::Verified(2, 2),
+					Event::<Runtime>::Verified(1, 2),
+					Event::<Runtime>::Verified(0, 2),
+					Event::<Runtime>::VerificationFailed(0, FeasibilityError::InvalidScore),
+				]
+			);
+
+			// the verification results.
+			assert_eq!(
+				MockSignedResults::get(),
+				vec![VerificationResult::Queued, VerificationResult::Rejected]
+			);
+
+			// and the queue is still in good shape.
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), Some(correct_score));
+			assert!(QueuedSolution::<Runtime>::invalid_iter().count().is_zero());
+			assert!(QueuedSolution::<Runtime>::backing_iter().count().is_zero());
+		})
+	}
+}
+
+mod sync_verification {
+	use frame_election_provider_support::Support;
+	use sp_core::bounded_vec;
+	use sp_npos_elections::ElectionScore;
+	use sp_runtime::Perbill;
+
+	use crate::{
+		mock::{
+			fake_solution, mine_solution, roll_to_snapshot_created, solution_from_supports,
+			verifier_events, ExtBuilder, MaxBackersPerWinner, MaxWinnersPerPage, MultiBlock,
+			Runtime, VerifierPallet,
+		},
+		verifier::{Event, FeasibilityError, Verifier},
+		PagedRawSolution, Snapshot,
+	};
+
+	#[test]
+	fn basic_sync_verification_works() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+			let single_page = mine_solution(1).unwrap();
+
+			assert_eq!(verifier_events(), vec![]);
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), None);
+
+			let _ = <VerifierPallet as Verifier>::verify_synchronous(
+				single_page.solution_pages.first().cloned().unwrap(),
+				single_page.score,
+				MultiBlock::msp(),
+			)
+			.unwrap();
+
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::<Runtime>::Verified(2, 2),
+					Event::<Runtime>::Queued(single_page.score, None)
+				]
+			);
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), Some(single_page.score));
+		})
+	}
+
+	#[test]
+	fn winner_count_more() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+			let single_page = mine_solution(1).unwrap();
+
+			// change the snapshot, as if the desired targets is now 1. This solution is then valid,
+			// but has too many.
+			Snapshot::<Runtime>::set_desired_targets(1);
+
+			assert_eq!(verifier_events(), vec![]);
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), None);
+
+			// note: this is NOT a storage_noop! because we do emit events.
+			assert_eq!(
+				<VerifierPallet as Verifier>::verify_synchronous(
+					single_page.solution_pages.first().cloned().unwrap(),
+					single_page.score,
+					MultiBlock::msp(),
+				)
+				.unwrap_err(),
+				FeasibilityError::WrongWinnerCount
+			);
+
+			assert_eq!(
+				verifier_events(),
+				vec![Event::<Runtime>::VerificationFailed(2, FeasibilityError::WrongWinnerCount)]
+			);
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), None);
+		})
+	}
+
+	#[test]
+	fn winner_count_less() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+			let single_page = mine_solution(1).unwrap();
+
+			assert_eq!(verifier_events(), vec![]);
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), None);
+
+			// Valid solution, but has now too few.
+			Snapshot::<Runtime>::set_desired_targets(3);
+
+			assert_eq!(
+				<VerifierPallet as Verifier>::verify_synchronous(
+					single_page.solution_pages.first().cloned().unwrap(),
+					single_page.score,
+					MultiBlock::msp(),
+				)
+				.unwrap_err(),
+				FeasibilityError::WrongWinnerCount
+			);
+
+			assert_eq!(
+				verifier_events(),
+				vec![Event::<Runtime>::VerificationFailed(2, FeasibilityError::WrongWinnerCount)]
+			);
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), None);
+		})
+	}
+
+	#[test]
+	fn incorrect_score_is_rejected() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			let single_page = mine_solution(1).unwrap();
+			let mut score_incorrect = single_page.score;
+			score_incorrect.minimal_stake += 1;
+
+			assert_eq!(
+				<VerifierPallet as Verifier>::verify_synchronous(
+					single_page.solution_pages.first().cloned().unwrap(),
+					score_incorrect,
+					MultiBlock::msp(),
+				)
+				.unwrap_err(),
+				FeasibilityError::InvalidScore
+			);
+
+			assert_eq!(
+				verifier_events(),
+				vec![Event::<Runtime>::VerificationFailed(2, FeasibilityError::InvalidScore),]
+			);
+		})
+	}
+
+	#[test]
+	fn minimum_untrusted_score_is_rejected() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			let single_page = mine_solution(1).unwrap();
+
+			// raise the bar such that we don't meet it.
+			let mut unattainable_score = single_page.score;
+			unattainable_score.minimal_stake += 1;
+
+			<VerifierPallet as Verifier>::set_minimum_score(unattainable_score);
+
+			assert_eq!(
+				<VerifierPallet as Verifier>::verify_synchronous(
+					single_page.solution_pages.first().cloned().unwrap(),
+					single_page.score,
+					MultiBlock::msp(),
+				)
+				.unwrap_err(),
+				FeasibilityError::ScoreTooLow
+			);
+
+			assert_eq!(
+				verifier_events(),
+				vec![Event::<Runtime>::VerificationFailed(2, FeasibilityError::ScoreTooLow)]
+			);
+		})
+	}
+
+	#[test]
+	fn bad_bounds_rejected() {
+		// MaxBackersPerWinner.
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			let single_page = mine_solution(1).unwrap();
+			// note: change this after the miner is done, otherwise it is smart enough to trim.
+			MaxBackersPerWinner::set(1);
+
+			assert_eq!(
+				<VerifierPallet as Verifier>::verify_synchronous(
+					single_page.solution_pages.first().cloned().unwrap(),
+					single_page.score,
+					MultiBlock::msp(),
+				)
+				.unwrap_err(),
+				FeasibilityError::FailedToBoundSupport
+			);
+
+			assert_eq!(
+				verifier_events(),
+				vec![Event::<Runtime>::VerificationFailed(
+					2,
+					FeasibilityError::FailedToBoundSupport
+				)]
+			);
+		});
+
+		// MaxWinnersPerPage.
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			let single_page = mine_solution(1).unwrap();
+			// note: the miner does feasibility internally, change this parameter afterwards.
+			MaxWinnersPerPage::set(1);
+
+			assert_eq!(
+				<VerifierPallet as Verifier>::verify_synchronous(
+					single_page.solution_pages.first().cloned().unwrap(),
+					single_page.score,
+					MultiBlock::msp(),
+				)
+				.unwrap_err(),
+				FeasibilityError::FailedToBoundSupport
+			);
+
+			assert_eq!(
+				verifier_events(),
+				vec![Event::<Runtime>::VerificationFailed(
+					2,
+					FeasibilityError::FailedToBoundSupport
+				)]
+			);
+		});
+	}
+
+	#[test]
+	fn solution_improvement_threshold_respected() {
+		ExtBuilder::verifier()
+			.solution_improvement_threshold(Perbill::from_percent(10))
+			.build_and_execute(|| {
+				roll_to_snapshot_created();
+
+				// submit something good.
+				let single_page = mine_solution(1).unwrap();
+				let _ = <VerifierPallet as Verifier>::verify_synchronous(
+					single_page.solution_pages.first().cloned().unwrap(),
+					single_page.score,
+					MultiBlock::msp(),
+				)
+				.unwrap();
+
+				// the slightly better solution need not even be correct. We improve it by 5%, but
+				// we need 10%.
+				let mut better_score = single_page.score;
+				let improvement = Perbill::from_percent(5) * better_score.minimal_stake;
+				better_score.minimal_stake += improvement;
+				let slightly_better = fake_solution(better_score);
+
+				assert_eq!(
+					<VerifierPallet as Verifier>::verify_synchronous(
+						slightly_better.solution_pages.first().cloned().unwrap(),
+						slightly_better.score,
+						MultiBlock::msp(),
+					)
+					.unwrap_err(),
+					FeasibilityError::ScoreTooLow
+				);
+			});
+	}
+
+	#[test]
+	fn weak_score_is_insta_rejected() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			// queue something useful.
+			let single_page = mine_solution(1).unwrap();
+			let _ = <VerifierPallet as Verifier>::verify_synchronous(
+				single_page.solution_pages.first().cloned().unwrap(),
+				single_page.score,
+				MultiBlock::msp(),
+			)
+			.unwrap();
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), Some(single_page.score));
+
+			// now try and submit that's really weak. Doesn't even need to be valid, since the score
+			// is checked first.
+			let mut bad_score = single_page.score;
+			bad_score.minimal_stake -= 1;
+			let weak = fake_solution(bad_score);
+
+			assert_eq!(
+				<VerifierPallet as Verifier>::verify_synchronous(
+					weak.solution_pages.first().cloned().unwrap(),
+					weak.score,
+					MultiBlock::msp(),
+				)
+				.unwrap_err(),
+				FeasibilityError::ScoreTooLow
+			);
+
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::<Runtime>::Verified(2, 2),
+					Event::<Runtime>::Queued(single_page.score, None),
+					Event::<Runtime>::VerificationFailed(2, FeasibilityError::ScoreTooLow),
+				]
+			);
+		})
+	}
+
+	#[test]
+	fn good_solution_replaces() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			let weak_solution = solution_from_supports(
+				vec![
+					(10, Support { total: 10, voters: vec![(1, 10)] }),
+					(20, Support { total: 10, voters: vec![(4, 10)] }),
+				],
+				2,
+			);
+
+			let weak_paged = PagedRawSolution::<Runtime> {
+				solution_pages: bounded_vec![weak_solution],
+				score: ElectionScore { minimal_stake: 10, sum_stake: 20, sum_stake_squared: 200 },
+				..Default::default()
+			};
+
+			let _ = <VerifierPallet as Verifier>::verify_synchronous(
+				weak_paged.solution_pages.first().cloned().unwrap(),
+				weak_paged.score,
+				MultiBlock::msp(),
+			)
+			.unwrap();
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), Some(weak_paged.score));
+
+			// now get a better solution.
+			let better = mine_solution(1).unwrap();
+
+			let _ = <VerifierPallet as Verifier>::verify_synchronous(
+				better.solution_pages.first().cloned().unwrap(),
+				better.score,
+				MultiBlock::msp(),
+			)
+			.unwrap();
+
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), Some(better.score));
+
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::<Runtime>::Verified(2, 2),
+					Event::<Runtime>::Queued(weak_paged.score, None),
+					Event::<Runtime>::Verified(2, 2),
+					Event::<Runtime>::Queued(better.score, Some(weak_paged.score)),
+				]
+			);
+		})
+	}
+
+	#[test]
+	fn weak_valid_is_discarded() {
+		ExtBuilder::verifier().build_and_execute(|| {
+			roll_to_snapshot_created();
+
+			// first, submit something good
+			let better = mine_solution(1).unwrap();
+			let _ = <VerifierPallet as Verifier>::verify_synchronous(
+				better.solution_pages.first().cloned().unwrap(),
+				better.score,
+				MultiBlock::msp(),
+			)
+			.unwrap();
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), Some(better.score));
+
+			// then try with something weaker.
+			let weak_solution = solution_from_supports(
+				vec![
+					(10, Support { total: 10, voters: vec![(1, 10)] }),
+					(20, Support { total: 10, voters: vec![(4, 10)] }),
+				],
+				2,
+			);
+			let weak_paged = PagedRawSolution::<Runtime> {
+				solution_pages: bounded_vec![weak_solution],
+				score: ElectionScore { minimal_stake: 10, sum_stake: 20, sum_stake_squared: 200 },
+				..Default::default()
+			};
+
+			assert_eq!(
+				<VerifierPallet as Verifier>::verify_synchronous(
+					weak_paged.solution_pages.first().cloned().unwrap(),
+					weak_paged.score,
+					MultiBlock::msp(),
+				)
+				.unwrap_err(),
+				FeasibilityError::ScoreTooLow
+			);
+
+			// queued solution has not changed.
+			assert_eq!(<VerifierPallet as Verifier>::queued_score(), Some(better.score));
+
+			assert_eq!(
+				verifier_events(),
+				vec![
+					Event::<Runtime>::Verified(2, 2),
+					Event::<Runtime>::Queued(better.score, None),
+					Event::<Runtime>::VerificationFailed(2, FeasibilityError::ScoreTooLow),
+				]
+			);
+		})
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/mod.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/mod.rs
new file mode 100644
index 00000000000..3050fc7e7f1
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/weights/measured/mod.rs
@@ -0,0 +1,21 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+pub mod pallet_election_provider_multi_block;
+pub mod pallet_election_provider_multi_block_signed;
+pub mod pallet_election_provider_multi_block_unsigned;
+pub mod pallet_election_provider_multi_block_verifier;
diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block.rs
new file mode 100644
index 00000000000..8e0d9cf1d16
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block.rs
@@ -0,0 +1,364 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Autogenerated weights for `pallet_election_provider_multi_block`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024`
+
+// Executed Command:
+// target/release/substrate-node
+// benchmark
+// pallet
+// --chain
+// dev
+// --pallet
+// pallet_election_provider_multi_block
+// --extrinsic
+// all
+// --steps
+// 2
+// --repeat
+// 3
+// --template
+// substrate/.maintain/frame-weight-template.hbs
+// --heap-pages
+// 65000
+// --default-pov-mode
+// measured
+// --output
+// ../measured
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+#![allow(dead_code)]
+
+use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
+use core::marker::PhantomData;
+
+/// Weight functions needed for `pallet_election_provider_multi_block`.
+pub trait WeightInfo {
+	fn on_initialize_nothing() -> Weight;
+	fn on_initialize_into_snapshot_msp() -> Weight;
+	fn on_initialize_into_snapshot_rest() -> Weight;
+	fn on_initialize_into_signed() -> Weight;
+	fn on_initialize_into_signed_validation() -> Weight;
+	fn on_initialize_into_unsigned() -> Weight;
+	fn manage() -> Weight;
+}
+
+/// Weights for `pallet_election_provider_multi_block` using the Substrate node and recommended hardware.
+pub struct SubstrateWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	fn on_initialize_nothing() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `156`
+		//  Estimated: `1641`
+		// Minimum execution time: 9_254_000 picoseconds.
+		Weight::from_parts(10_145_000, 1641)
+			.saturating_add(T::DbWeight::get().reads(2_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `Staking::ValidatorCount` (r:1 w:0)
+	/// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `Staking::CounterForValidators` (r:1 w:0)
+	/// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `Staking::Validators` (r:1002 w:0)
+	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`)
+	/// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1)
+	/// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`)
+	/// Storage: `VoterList::CounterForListNodes` (r:1 w:0)
+	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `VoterList::ListBags` (r:200 w:0)
+	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`)
+	/// Storage: `VoterList::ListNodes` (r:26001 w:0)
+	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`)
+	/// Storage: `Staking::Bonded` (r:703 w:0)
+	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`)
+	/// Storage: `Staking::Ledger` (r:703 w:0)
+	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `Measured`)
+	/// Storage: `Staking::Nominators` (r:703 w:0)
+	/// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:0 w:1)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`)
+	/// Storage: `Staking::MinimumActiveStake` (r:0 w:1)
+	/// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`)
+	fn on_initialize_into_snapshot_msp() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `5151586`
+		//  Estimated: `69505051`
+		// Minimum execution time: 201_905_061_000 picoseconds.
+		Weight::from_parts(203_148_720_000, 69505051)
+			.saturating_add(T::DbWeight::get().reads(29318_u64))
+			.saturating_add(T::DbWeight::get().writes(8_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1)
+	/// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`)
+	/// Storage: `VoterList::CounterForListNodes` (r:1 w:0)
+	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `VoterList::ListNodes` (r:26001 w:0)
+	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`)
+	/// Storage: `Staking::Bonded` (r:704 w:0)
+	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`)
+	/// Storage: `Staking::Ledger` (r:704 w:0)
+	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `Measured`)
+	/// Storage: `Staking::Nominators` (r:703 w:0)
+	/// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`)
+	/// Storage: `VoterList::ListBags` (r:200 w:0)
+	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`)
+	/// Storage: `Staking::Validators` (r:165 w:0)
+	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`)
+	/// Storage: `Staking::MinimumActiveStake` (r:0 w:1)
+	/// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`)
+	fn on_initialize_into_snapshot_rest() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `5329975`
+		//  Estimated: `69683440`
+		// Minimum execution time: 195_257_628_000 picoseconds.
+		Weight::from_parts(195_317_909_000, 69683440)
+			.saturating_add(T::DbWeight::get().reads(28481_u64))
+			.saturating_add(T::DbWeight::get().writes(5_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	fn on_initialize_into_signed() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `340`
+		//  Estimated: `1825`
+		// Minimum execution time: 649_767_000 picoseconds.
+		Weight::from_parts(764_370_000, 1825)
+			.saturating_add(T::DbWeight::get().reads(2_u64))
+			.saturating_add(T::DbWeight::get().writes(1_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	fn on_initialize_into_signed_validation() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `340`
+		//  Estimated: `3805`
+		// Minimum execution time: 657_218_000 picoseconds.
+		Weight::from_parts(674_575_000, 3805)
+			.saturating_add(T::DbWeight::get().reads(4_u64))
+			.saturating_add(T::DbWeight::get().writes(1_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`)
+	fn on_initialize_into_unsigned() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `340`
+		//  Estimated: `1825`
+		// Minimum execution time: 866_827_000 picoseconds.
+		Weight::from_parts(890_863_000, 1825)
+			.saturating_add(T::DbWeight::get().reads(3_u64))
+			.saturating_add(T::DbWeight::get().writes(2_u64))
+	}
+	fn manage() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 140_000 picoseconds.
+		Weight::from_parts(170_000, 0)
+	}
+}
+
+// For backwards compatibility and tests.
+impl WeightInfo for () {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	fn on_initialize_nothing() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `156`
+		//  Estimated: `1641`
+		// Minimum execution time: 9_254_000 picoseconds.
+		Weight::from_parts(10_145_000, 1641)
+			.saturating_add(RocksDbWeight::get().reads(2_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `Staking::ValidatorCount` (r:1 w:0)
+	/// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `Staking::CounterForValidators` (r:1 w:0)
+	/// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `Staking::Validators` (r:1002 w:0)
+	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`)
+	/// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1)
+	/// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`)
+	/// Storage: `VoterList::CounterForListNodes` (r:1 w:0)
+	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `VoterList::ListBags` (r:200 w:0)
+	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`)
+	/// Storage: `VoterList::ListNodes` (r:26001 w:0)
+	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`)
+	/// Storage: `Staking::Bonded` (r:703 w:0)
+	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`)
+	/// Storage: `Staking::Ledger` (r:703 w:0)
+	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `Measured`)
+	/// Storage: `Staking::Nominators` (r:703 w:0)
+	/// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:0 w:1)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`)
+	/// Storage: `Staking::MinimumActiveStake` (r:0 w:1)
+	/// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`)
+	fn on_initialize_into_snapshot_msp() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `5151586`
+		//  Estimated: `69505051`
+		// Minimum execution time: 201_905_061_000 picoseconds.
+		Weight::from_parts(203_148_720_000, 69505051)
+			.saturating_add(RocksDbWeight::get().reads(29318_u64))
+			.saturating_add(RocksDbWeight::get().writes(8_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1)
+	/// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`)
+	/// Storage: `VoterList::CounterForListNodes` (r:1 w:0)
+	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `VoterList::ListNodes` (r:26001 w:0)
+	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`)
+	/// Storage: `Staking::Bonded` (r:704 w:0)
+	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`)
+	/// Storage: `Staking::Ledger` (r:704 w:0)
+	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `Measured`)
+	/// Storage: `Staking::Nominators` (r:703 w:0)
+	/// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`)
+	/// Storage: `VoterList::ListBags` (r:200 w:0)
+	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`)
+	/// Storage: `Staking::Validators` (r:165 w:0)
+	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`)
+	/// Storage: `Staking::MinimumActiveStake` (r:0 w:1)
+	/// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`)
+	fn on_initialize_into_snapshot_rest() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `5329975`
+		//  Estimated: `69683440`
+		// Minimum execution time: 195_257_628_000 picoseconds.
+		Weight::from_parts(195_317_909_000, 69683440)
+			.saturating_add(RocksDbWeight::get().reads(28481_u64))
+			.saturating_add(RocksDbWeight::get().writes(5_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	fn on_initialize_into_signed() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `340`
+		//  Estimated: `1825`
+		// Minimum execution time: 649_767_000 picoseconds.
+		Weight::from_parts(764_370_000, 1825)
+			.saturating_add(RocksDbWeight::get().reads(2_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	fn on_initialize_into_signed_validation() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `340`
+		//  Estimated: `3805`
+		// Minimum execution time: 657_218_000 picoseconds.
+		Weight::from_parts(674_575_000, 3805)
+			.saturating_add(RocksDbWeight::get().reads(4_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`)
+	fn on_initialize_into_unsigned() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `340`
+		//  Estimated: `1825`
+		// Minimum execution time: 866_827_000 picoseconds.
+		Weight::from_parts(890_863_000, 1825)
+			.saturating_add(RocksDbWeight::get().reads(3_u64))
+			.saturating_add(RocksDbWeight::get().writes(2_u64))
+	}
+	fn manage() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 140_000 picoseconds.
+		Weight::from_parts(170_000, 0)
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_signed.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_signed.rs
new file mode 100644
index 00000000000..3eb0e3ccd48
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_signed.rs
@@ -0,0 +1,272 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Autogenerated weights for `pallet_election_provider_multi_block::signed`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024`
+
+// Executed Command:
+// target/release/substrate-node
+// benchmark
+// pallet
+// --chain
+// dev
+// --pallet
+// pallet_election_provider_multi_block::signed
+// --extrinsic
+// all
+// --steps
+// 2
+// --repeat
+// 3
+// --template
+// substrate/.maintain/frame-weight-template.hbs
+// --heap-pages
+// 65000
+// --default-pov-mode
+// measured
+// --output
+// ../measured
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+#![allow(dead_code)]
+
+use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
+use core::marker::PhantomData;
+
+/// Weight functions needed for `pallet_election_provider_multi_block::signed`.
+pub trait WeightInfo {
+	fn register_not_full() -> Weight;
+	fn register_eject() -> Weight;
+	fn submit_page() -> Weight;
+	fn unset_page() -> Weight;
+	fn bail() -> Weight;
+}
+
+/// Weights for `pallet_election_provider_multi_block::signed` using the Substrate node and recommended hardware.
+pub struct SubstrateWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:0 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	fn register_not_full() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `3043`
+		//  Estimated: `6508`
+		// Minimum execution time: 62_425_000 picoseconds.
+		Weight::from_parts(63_507_000, 6508)
+			.saturating_add(T::DbWeight::get().reads(4_u64))
+			.saturating_add(T::DbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:2 w:2)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:2)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	fn register_eject() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `7643`
+		//  Estimated: `87833`
+		// Minimum execution time: 148_826_000 picoseconds.
+		Weight::from_parts(155_275_000, 87833)
+			.saturating_add(T::DbWeight::get().reads(38_u64))
+			.saturating_add(T::DbWeight::get().writes(37_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	fn submit_page() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `3459`
+		//  Estimated: `6924`
+		// Minimum execution time: 697_450_000 picoseconds.
+		Weight::from_parts(762_938_000, 6924)
+			.saturating_add(T::DbWeight::get().reads(5_u64))
+			.saturating_add(T::DbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	fn unset_page() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `4287`
+		//  Estimated: `7752`
+		// Minimum execution time: 681_035_000 picoseconds.
+		Weight::from_parts(711_671_000, 7752)
+			.saturating_add(T::DbWeight::get().reads(5_u64))
+			.saturating_add(T::DbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	fn bail() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `4508`
+		//  Estimated: `84698`
+		// Minimum execution time: 117_619_000 picoseconds.
+		Weight::from_parts(118_169_000, 84698)
+			.saturating_add(T::DbWeight::get().reads(37_u64))
+			.saturating_add(T::DbWeight::get().writes(35_u64))
+	}
+}
+
+// For backwards compatibility and tests.
+impl WeightInfo for () {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:0 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	fn register_not_full() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `3043`
+		//  Estimated: `6508`
+		// Minimum execution time: 62_425_000 picoseconds.
+		Weight::from_parts(63_507_000, 6508)
+			.saturating_add(RocksDbWeight::get().reads(4_u64))
+			.saturating_add(RocksDbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:2 w:2)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:2)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	fn register_eject() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `7643`
+		//  Estimated: `87833`
+		// Minimum execution time: 148_826_000 picoseconds.
+		Weight::from_parts(155_275_000, 87833)
+			.saturating_add(RocksDbWeight::get().reads(38_u64))
+			.saturating_add(RocksDbWeight::get().writes(37_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	fn submit_page() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `3459`
+		//  Estimated: `6924`
+		// Minimum execution time: 697_450_000 picoseconds.
+		Weight::from_parts(762_938_000, 6924)
+			.saturating_add(RocksDbWeight::get().reads(5_u64))
+			.saturating_add(RocksDbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	fn unset_page() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `4287`
+		//  Estimated: `7752`
+		// Minimum execution time: 681_035_000 picoseconds.
+		Weight::from_parts(711_671_000, 7752)
+			.saturating_add(RocksDbWeight::get().reads(5_u64))
+			.saturating_add(RocksDbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	fn bail() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `4508`
+		//  Estimated: `84698`
+		// Minimum execution time: 117_619_000 picoseconds.
+		Weight::from_parts(118_169_000, 84698)
+			.saturating_add(RocksDbWeight::get().reads(37_u64))
+			.saturating_add(RocksDbWeight::get().writes(35_u64))
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_unsigned.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_unsigned.rs
new file mode 100644
index 00000000000..3fbe8099f87
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_unsigned.rs
@@ -0,0 +1,153 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Autogenerated weights for `pallet_election_provider_multi_block::unsigned`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024`
+
+// Executed Command:
+// target/release/substrate-node
+// benchmark
+// pallet
+// --chain
+// dev
+// --pallet
+// pallet_election_provider_multi_block::unsigned
+// --extrinsic
+// all
+// --steps
+// 2
+// --repeat
+// 3
+// --template
+// substrate/.maintain/frame-weight-template.hbs
+// --heap-pages
+// 65000
+// --default-pov-mode
+// measured
+// --output
+// ../measured
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+#![allow(dead_code)]
+
+use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
+use core::marker::PhantomData;
+
+/// Weight functions needed for `pallet_election_provider_multi_block::unsigned`.
+pub trait WeightInfo {
+	fn validate_unsigned() -> Weight;
+	fn submit_unsigned() -> Weight;
+}
+
+/// Weights for `pallet_election_provider_multi_block::unsigned` using the Substrate node and recommended hardware.
+pub struct SubstrateWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	fn validate_unsigned() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `364`
+		//  Estimated: `1849`
+		// Minimum execution time: 80_312_000 picoseconds.
+		Weight::from_parts(80_762_000, 1849)
+			.saturating_add(T::DbWeight::get().reads(5_u64))
+	}
+	/// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionY` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionY` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`)
+	fn submit_unsigned() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `157641`
+		//  Estimated: `161106`
+		// Minimum execution time: 3_629_133_000 picoseconds.
+		Weight::from_parts(4_086_909_000, 161106)
+			.saturating_add(T::DbWeight::get().reads(6_u64))
+			.saturating_add(T::DbWeight::get().writes(2_u64))
+	}
+}
+
+// For backwards compatibility and tests.
+impl WeightInfo for () {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	fn validate_unsigned() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `364`
+		//  Estimated: `1849`
+		// Minimum execution time: 80_312_000 picoseconds.
+		Weight::from_parts(80_762_000, 1849)
+			.saturating_add(RocksDbWeight::get().reads(5_u64))
+	}
+	/// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionY` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionY` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`)
+	fn submit_unsigned() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `157641`
+		//  Estimated: `161106`
+		// Minimum execution time: 3_629_133_000 picoseconds.
+		Weight::from_parts(4_086_909_000, 161106)
+			.saturating_add(RocksDbWeight::get().reads(6_u64))
+			.saturating_add(RocksDbWeight::get().writes(2_u64))
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_verifier.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_verifier.rs
new file mode 100644
index 00000000000..cec05a6e08a
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_verifier.rs
@@ -0,0 +1,361 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Autogenerated weights for `pallet_election_provider_multi_block::verifier`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024`
+
+// Executed Command:
+// target/release/substrate-node
+// benchmark
+// pallet
+// --chain
+// dev
+// --pallet
+// pallet_election_provider_multi_block::verifier
+// --extrinsic
+// all
+// --steps
+// 2
+// --repeat
+// 3
+// --template
+// substrate/.maintain/frame-weight-template.hbs
+// --heap-pages
+// 65000
+// --default-pov-mode
+// measured
+// --output
+// ../measured
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+#![allow(dead_code)]
+
+use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
+use core::marker::PhantomData;
+
+/// Weight functions needed for `pallet_election_provider_multi_block::verifier`.
+pub trait WeightInfo {
+	fn on_initialize_valid_non_terminal() -> Weight;
+	fn on_initialize_valid_terminal() -> Weight;
+	fn on_initialize_invalid_terminal() -> Weight;
+	fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight;
+}
+
+/// Weights for `pallet_election_provider_multi_block::verifier` using the Substrate node and recommended hardware.
+pub struct SubstrateWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:0)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`)
+	fn on_initialize_valid_non_terminal() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `160552`
+		//  Estimated: `164017`
+		// Minimum execution time: 917_013_000 picoseconds.
+		Weight::from_parts(919_406_000, 164017)
+			.saturating_add(T::DbWeight::get().reads(9_u64))
+			.saturating_add(T::DbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`)
+	fn on_initialize_valid_terminal() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `881924`
+		//  Estimated: `964589`
+		// Minimum execution time: 1_932_757_000 picoseconds.
+		Weight::from_parts(1_961_530_000, 964589)
+			.saturating_add(T::DbWeight::get().reads(76_u64))
+			.saturating_add(T::DbWeight::get().writes(71_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:32)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`)
+	fn on_initialize_invalid_terminal() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `882945`
+		//  Estimated: `965610`
+		// Minimum execution time: 1_919_946_000 picoseconds.
+		Weight::from_parts(1_949_902_000, 965610)
+			.saturating_add(T::DbWeight::get().reads(106_u64))
+			.saturating_add(T::DbWeight::get().writes(100_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:31)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:31 w:31)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	/// The range of component `v` is `[0, 31]`.
+	fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `164728 + v * (8538 ±0)`
+		//  Estimated: `244918 + v * (16343 ±0)`
+		// Minimum execution time: 572_970_000 picoseconds.
+		Weight::from_parts(886_325_333, 244918)
+			// Standard Error: 19_873_926
+			.saturating_add(Weight::from_parts(27_871_795, 0).saturating_mul(v.into()))
+			.saturating_add(T::DbWeight::get().reads(42_u64))
+			.saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(v.into())))
+			.saturating_add(T::DbWeight::get().writes(36_u64))
+			.saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(v.into())))
+			.saturating_add(Weight::from_parts(0, 16343).saturating_mul(v.into()))
+	}
+}
+
+// For backwards compatibility and tests.
+impl WeightInfo for () {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:0)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`)
+	fn on_initialize_valid_non_terminal() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `160552`
+		//  Estimated: `164017`
+		// Minimum execution time: 917_013_000 picoseconds.
+		Weight::from_parts(919_406_000, 164017)
+			.saturating_add(RocksDbWeight::get().reads(9_u64))
+			.saturating_add(RocksDbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`)
+	fn on_initialize_valid_terminal() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `881924`
+		//  Estimated: `964589`
+		// Minimum execution time: 1_932_757_000 picoseconds.
+		Weight::from_parts(1_961_530_000, 964589)
+			.saturating_add(RocksDbWeight::get().reads(76_u64))
+			.saturating_add(RocksDbWeight::get().writes(71_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:32)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`)
+	fn on_initialize_invalid_terminal() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `882945`
+		//  Estimated: `965610`
+		// Minimum execution time: 1_919_946_000 picoseconds.
+		Weight::from_parts(1_949_902_000, 965610)
+			.saturating_add(RocksDbWeight::get().reads(106_u64))
+			.saturating_add(RocksDbWeight::get().writes(100_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:31)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:31 w:31)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`)
+	/// The range of component `v` is `[0, 31]`.
+	fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `164728 + v * (8538 ±0)`
+		//  Estimated: `244918 + v * (16343 ±0)`
+		// Minimum execution time: 572_970_000 picoseconds.
+		Weight::from_parts(886_325_333, 244918)
+			// Standard Error: 19_873_926
+			.saturating_add(Weight::from_parts(27_871_795, 0).saturating_mul(v.into()))
+			.saturating_add(RocksDbWeight::get().reads(42_u64))
+			.saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(v.into())))
+			.saturating_add(RocksDbWeight::get().writes(36_u64))
+			.saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(v.into())))
+			.saturating_add(Weight::from_parts(0, 16343).saturating_mul(v.into()))
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/weights/mel/mod.rs b/substrate/frame/election-provider-multi-block/src/weights/mel/mod.rs
new file mode 100644
index 00000000000..3050fc7e7f1
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/weights/mel/mod.rs
@@ -0,0 +1,21 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+pub mod pallet_election_provider_multi_block;
+pub mod pallet_election_provider_multi_block_signed;
+pub mod pallet_election_provider_multi_block_unsigned;
+pub mod pallet_election_provider_multi_block_verifier;
diff --git a/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block.rs b/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block.rs
new file mode 100644
index 00000000000..25b97d446cf
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block.rs
@@ -0,0 +1,362 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Autogenerated weights for `pallet_election_provider_multi_block`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024`
+
+// Executed Command:
+// target/release/substrate-node
+// benchmark
+// pallet
+// --chain
+// dev
+// --pallet
+// pallet_election_provider_multi_block
+// --extrinsic
+// all
+// --steps
+// 2
+// --repeat
+// 3
+// --template
+// substrate/.maintain/frame-weight-template.hbs
+// --heap-pages
+// 65000
+// --output
+// ../mel
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+#![allow(dead_code)]
+
+use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
+use core::marker::PhantomData;
+
+/// Weight functions needed for `pallet_election_provider_multi_block`.
+pub trait WeightInfo {
+	fn on_initialize_nothing() -> Weight;
+	fn on_initialize_into_snapshot_msp() -> Weight;
+	fn on_initialize_into_snapshot_rest() -> Weight;
+	fn on_initialize_into_signed() -> Weight;
+	fn on_initialize_into_signed_validation() -> Weight;
+	fn on_initialize_into_unsigned() -> Weight;
+	fn manage() -> Weight;
+}
+
+/// Weights for `pallet_election_provider_multi_block` using the Substrate node and recommended hardware.
+pub struct SubstrateWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	fn on_initialize_nothing() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `156`
+		//  Estimated: `1490`
+		// Minimum execution time: 9_425_000 picoseconds.
+		Weight::from_parts(9_514_000, 1490)
+			.saturating_add(T::DbWeight::get().reads(2_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::ValidatorCount` (r:1 w:0)
+	/// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::CounterForValidators` (r:1 w:0)
+	/// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Validators` (r:1002 w:0)
+	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1)
+	/// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`)
+	/// Storage: `VoterList::CounterForListNodes` (r:1 w:0)
+	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `VoterList::ListBags` (r:200 w:0)
+	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
+	/// Storage: `VoterList::ListNodes` (r:26001 w:0)
+	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Bonded` (r:703 w:0)
+	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Ledger` (r:703 w:0)
+	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Nominators` (r:703 w:0)
+	/// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:0 w:1)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::MinimumActiveStake` (r:0 w:1)
+	/// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	fn on_initialize_into_snapshot_msp() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `5151586`
+		//  Estimated: `68357619`
+		// Minimum execution time: 205_124_352_000 picoseconds.
+		Weight::from_parts(206_087_996_000, 68357619)
+			.saturating_add(T::DbWeight::get().reads(29318_u64))
+			.saturating_add(T::DbWeight::get().writes(8_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1)
+	/// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`)
+	/// Storage: `VoterList::CounterForListNodes` (r:1 w:0)
+	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `VoterList::ListNodes` (r:26001 w:0)
+	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Bonded` (r:704 w:0)
+	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Ledger` (r:704 w:0)
+	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Nominators` (r:703 w:0)
+	/// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`)
+	/// Storage: `VoterList::ListBags` (r:200 w:0)
+	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Validators` (r:165 w:0)
+	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::MinimumActiveStake` (r:0 w:1)
+	/// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	fn on_initialize_into_snapshot_rest() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `5329975`
+		//  Estimated: `68357619`
+		// Minimum execution time: 197_146_155_000 picoseconds.
+		Weight::from_parts(198_376_173_000, 68357619)
+			.saturating_add(T::DbWeight::get().reads(28481_u64))
+			.saturating_add(T::DbWeight::get().writes(5_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	fn on_initialize_into_signed() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `340`
+		//  Estimated: `1490`
+		// Minimum execution time: 750_450_000 picoseconds.
+		Weight::from_parts(764_001_000, 1490)
+			.saturating_add(T::DbWeight::get().reads(2_u64))
+			.saturating_add(T::DbWeight::get().writes(1_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	fn on_initialize_into_signed_validation() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `340`
+		//  Estimated: `4118`
+		// Minimum execution time: 626_412_000 picoseconds.
+		Weight::from_parts(663_538_000, 4118)
+			.saturating_add(T::DbWeight::get().reads(4_u64))
+			.saturating_add(T::DbWeight::get().writes(1_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`)
+	fn on_initialize_into_unsigned() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `340`
+		//  Estimated: `1490`
+		// Minimum execution time: 734_786_000 picoseconds.
+		Weight::from_parts(882_059_000, 1490)
+			.saturating_add(T::DbWeight::get().reads(3_u64))
+			.saturating_add(T::DbWeight::get().writes(2_u64))
+	}
+	fn manage() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 141_000 picoseconds.
+		Weight::from_parts(150_000, 0)
+	}
+}
+
+// For backwards compatibility and tests.
+impl WeightInfo for () {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	fn on_initialize_nothing() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `156`
+		//  Estimated: `1490`
+		// Minimum execution time: 9_425_000 picoseconds.
+		Weight::from_parts(9_514_000, 1490)
+			.saturating_add(RocksDbWeight::get().reads(2_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::ValidatorCount` (r:1 w:0)
+	/// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::CounterForValidators` (r:1 w:0)
+	/// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Validators` (r:1002 w:0)
+	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1)
+	/// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`)
+	/// Storage: `VoterList::CounterForListNodes` (r:1 w:0)
+	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `VoterList::ListBags` (r:200 w:0)
+	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
+	/// Storage: `VoterList::ListNodes` (r:26001 w:0)
+	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Bonded` (r:703 w:0)
+	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Ledger` (r:703 w:0)
+	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Nominators` (r:703 w:0)
+	/// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:0 w:1)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::MinimumActiveStake` (r:0 w:1)
+	/// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	fn on_initialize_into_snapshot_msp() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `5151586`
+		//  Estimated: `68357619`
+		// Minimum execution time: 205_124_352_000 picoseconds.
+		Weight::from_parts(206_087_996_000, 68357619)
+			.saturating_add(RocksDbWeight::get().reads(29318_u64))
+			.saturating_add(RocksDbWeight::get().writes(8_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1)
+	/// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`)
+	/// Storage: `VoterList::CounterForListNodes` (r:1 w:0)
+	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `VoterList::ListNodes` (r:26001 w:0)
+	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Bonded` (r:704 w:0)
+	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Ledger` (r:704 w:0)
+	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Nominators` (r:703 w:0)
+	/// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`)
+	/// Storage: `VoterList::ListBags` (r:200 w:0)
+	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Validators` (r:165 w:0)
+	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1)
+	/// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::MinimumActiveStake` (r:0 w:1)
+	/// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	fn on_initialize_into_snapshot_rest() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `5329975`
+		//  Estimated: `68357619`
+		// Minimum execution time: 197_146_155_000 picoseconds.
+		Weight::from_parts(198_376_173_000, 68357619)
+			.saturating_add(RocksDbWeight::get().reads(28481_u64))
+			.saturating_add(RocksDbWeight::get().writes(5_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	fn on_initialize_into_signed() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `340`
+		//  Estimated: `1490`
+		// Minimum execution time: 750_450_000 picoseconds.
+		Weight::from_parts(764_001_000, 1490)
+			.saturating_add(RocksDbWeight::get().reads(2_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	fn on_initialize_into_signed_validation() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `340`
+		//  Estimated: `4118`
+		// Minimum execution time: 626_412_000 picoseconds.
+		Weight::from_parts(663_538_000, 4118)
+			.saturating_add(RocksDbWeight::get().reads(4_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:1)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`)
+	fn on_initialize_into_unsigned() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `340`
+		//  Estimated: `1490`
+		// Minimum execution time: 734_786_000 picoseconds.
+		Weight::from_parts(882_059_000, 1490)
+			.saturating_add(RocksDbWeight::get().reads(3_u64))
+			.saturating_add(RocksDbWeight::get().writes(2_u64))
+	}
+	fn manage() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 141_000 picoseconds.
+		Weight::from_parts(150_000, 0)
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_signed.rs b/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_signed.rs
new file mode 100644
index 00000000000..98e238145ae
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_signed.rs
@@ -0,0 +1,270 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Autogenerated weights for `pallet_election_provider_multi_block::signed`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024`
+
+// Executed Command:
+// target/release/substrate-node
+// benchmark
+// pallet
+// --chain
+// dev
+// --pallet
+// pallet_election_provider_multi_block::signed
+// --extrinsic
+// all
+// --steps
+// 2
+// --repeat
+// 3
+// --template
+// substrate/.maintain/frame-weight-template.hbs
+// --heap-pages
+// 65000
+// --output
+// ../mel
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+#![allow(dead_code)]
+
+use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
+use core::marker::PhantomData;
+
+/// Weight functions needed for `pallet_election_provider_multi_block::signed`.
+pub trait WeightInfo {
+	fn register_not_full() -> Weight;
+	fn register_eject() -> Weight;
+	fn submit_page() -> Weight;
+	fn unset_page() -> Weight;
+	fn bail() -> Weight;
+}
+
+/// Weights for `pallet_election_provider_multi_block::signed` using the Substrate node and recommended hardware.
+pub struct SubstrateWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:0 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	fn register_not_full() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `3043`
+		//  Estimated: `4118`
+		// Minimum execution time: 60_863_000 picoseconds.
+		Weight::from_parts(61_264_000, 4118)
+			.saturating_add(T::DbWeight::get().reads(4_u64))
+			.saturating_add(T::DbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:2 w:2)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:2)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	fn register_eject() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `7643`
+		//  Estimated: `1185054`
+		// Minimum execution time: 149_207_000 picoseconds.
+		Weight::from_parts(151_520_000, 1185054)
+			.saturating_add(T::DbWeight::get().reads(38_u64))
+			.saturating_add(T::DbWeight::get().writes(37_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	fn submit_page() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `3459`
+		//  Estimated: `37992`
+		// Minimum execution time: 707_404_000 picoseconds.
+		Weight::from_parts(752_393_000, 37992)
+			.saturating_add(T::DbWeight::get().reads(5_u64))
+			.saturating_add(T::DbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	fn unset_page() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `4287`
+		//  Estimated: `37992`
+		// Minimum execution time: 716_769_000 picoseconds.
+		Weight::from_parts(761_406_000, 37992)
+			.saturating_add(T::DbWeight::get().reads(5_u64))
+			.saturating_add(T::DbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	fn bail() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `4508`
+		//  Estimated: `1185054`
+		// Minimum execution time: 117_038_000 picoseconds.
+		Weight::from_parts(117_468_000, 1185054)
+			.saturating_add(T::DbWeight::get().reads(37_u64))
+			.saturating_add(T::DbWeight::get().writes(35_u64))
+	}
+}
+
+// For backwards compatibility and tests.
+impl WeightInfo for () {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:0 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	fn register_not_full() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `3043`
+		//  Estimated: `4118`
+		// Minimum execution time: 60_863_000 picoseconds.
+		Weight::from_parts(61_264_000, 4118)
+			.saturating_add(RocksDbWeight::get().reads(4_u64))
+			.saturating_add(RocksDbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:2 w:2)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:2)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	fn register_eject() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `7643`
+		//  Estimated: `1185054`
+		// Minimum execution time: 149_207_000 picoseconds.
+		Weight::from_parts(151_520_000, 1185054)
+			.saturating_add(RocksDbWeight::get().reads(38_u64))
+			.saturating_add(RocksDbWeight::get().writes(37_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	fn submit_page() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `3459`
+		//  Estimated: `37992`
+		// Minimum execution time: 707_404_000 picoseconds.
+		Weight::from_parts(752_393_000, 37992)
+			.saturating_add(RocksDbWeight::get().reads(5_u64))
+			.saturating_add(RocksDbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	fn unset_page() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `4287`
+		//  Estimated: `37992`
+		// Minimum execution time: 716_769_000 picoseconds.
+		Weight::from_parts(761_406_000, 37992)
+			.saturating_add(RocksDbWeight::get().reads(5_u64))
+			.saturating_add(RocksDbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	fn bail() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `4508`
+		//  Estimated: `1185054`
+		// Minimum execution time: 117_038_000 picoseconds.
+		Weight::from_parts(117_468_000, 1185054)
+			.saturating_add(RocksDbWeight::get().reads(37_u64))
+			.saturating_add(RocksDbWeight::get().writes(35_u64))
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_unsigned.rs b/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_unsigned.rs
new file mode 100644
index 00000000000..7f05b13174a
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_unsigned.rs
@@ -0,0 +1,151 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Autogenerated weights for `pallet_election_provider_multi_block::unsigned`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024`
+
+// Executed Command:
+// target/release/substrate-node
+// benchmark
+// pallet
+// --chain
+// dev
+// --pallet
+// pallet_election_provider_multi_block::unsigned
+// --extrinsic
+// all
+// --steps
+// 2
+// --repeat
+// 3
+// --template
+// substrate/.maintain/frame-weight-template.hbs
+// --heap-pages
+// 65000
+// --output
+// ../mel
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+#![allow(dead_code)]
+
+use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
+use core::marker::PhantomData;
+
+/// Weight functions needed for `pallet_election_provider_multi_block::unsigned`.
+pub trait WeightInfo {
+	fn validate_unsigned() -> Weight;
+	fn submit_unsigned() -> Weight;
+}
+
+/// Weights for `pallet_election_provider_multi_block::unsigned` using the Substrate node and recommended hardware.
+pub struct SubstrateWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn validate_unsigned() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `364`
+		//  Estimated: `1533`
+		// Minimum execution time: 77_037_000 picoseconds.
+		Weight::from_parts(77_588_000, 1533)
+			.saturating_add(T::DbWeight::get().reads(5_u64))
+	}
+	/// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionY` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionY` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`)
+	fn submit_unsigned() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `157641`
+		//  Estimated: `392238`
+		// Minimum execution time: 3_607_268_000 picoseconds.
+		Weight::from_parts(4_015_058_000, 392238)
+			.saturating_add(T::DbWeight::get().reads(6_u64))
+			.saturating_add(T::DbWeight::get().writes(2_u64))
+	}
+}
+
+// For backwards compatibility and tests.
+impl WeightInfo for () {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn validate_unsigned() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `364`
+		//  Estimated: `1533`
+		// Minimum execution time: 77_037_000 picoseconds.
+		Weight::from_parts(77_588_000, 1533)
+			.saturating_add(RocksDbWeight::get().reads(5_u64))
+	}
+	/// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionY` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionY` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`)
+	fn submit_unsigned() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `157641`
+		//  Estimated: `392238`
+		// Minimum execution time: 3_607_268_000 picoseconds.
+		Weight::from_parts(4_015_058_000, 392238)
+			.saturating_add(RocksDbWeight::get().reads(6_u64))
+			.saturating_add(RocksDbWeight::get().writes(2_u64))
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_verifier.rs b/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_verifier.rs
new file mode 100644
index 00000000000..55d359f5c28
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_verifier.rs
@@ -0,0 +1,359 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Autogenerated weights for `pallet_election_provider_multi_block::verifier`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024`
+
+// Executed Command:
+// target/release/substrate-node
+// benchmark
+// pallet
+// --chain
+// dev
+// --pallet
+// pallet_election_provider_multi_block::verifier
+// --extrinsic
+// all
+// --steps
+// 2
+// --repeat
+// 3
+// --template
+// substrate/.maintain/frame-weight-template.hbs
+// --heap-pages
+// 65000
+// --output
+// ../mel
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+#![allow(dead_code)]
+
+use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
+use core::marker::PhantomData;
+
+/// Weight functions needed for `pallet_election_provider_multi_block::verifier`.
+pub trait WeightInfo {
+	fn on_initialize_valid_non_terminal() -> Weight;
+	fn on_initialize_valid_terminal() -> Weight;
+	fn on_initialize_invalid_terminal() -> Weight;
+	fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight;
+}
+
+/// Weights for `pallet_election_provider_multi_block::verifier` using the Substrate node and recommended hardware.
+pub struct SubstrateWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:0)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`)
+	fn on_initialize_valid_non_terminal() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `160552`
+		//  Estimated: `392238`
+		// Minimum execution time: 881_299_000 picoseconds.
+		Weight::from_parts(1_161_243_000, 392238)
+			.saturating_add(T::DbWeight::get().reads(9_u64))
+			.saturating_add(T::DbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`)
+	fn on_initialize_valid_terminal() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `881924`
+		//  Estimated: `1799127`
+		// Minimum execution time: 1_974_549_000 picoseconds.
+		Weight::from_parts(2_755_105_000, 1799127)
+			.saturating_add(T::DbWeight::get().reads(76_u64))
+			.saturating_add(T::DbWeight::get().writes(71_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:32)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`)
+	fn on_initialize_invalid_terminal() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `882945`
+		//  Estimated: `192092149`
+		// Minimum execution time: 1_982_131_000 picoseconds.
+		Weight::from_parts(1_994_790_000, 192092149)
+			.saturating_add(T::DbWeight::get().reads(106_u64))
+			.saturating_add(T::DbWeight::get().writes(100_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:31)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:31 w:31)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	/// The range of component `v` is `[0, 31]`.
+	fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `164728 + v * (8538 ±0)`
+		//  Estimated: `1185054 + v * (6190080 ±0)`
+		// Minimum execution time: 574_462_000 picoseconds.
+		Weight::from_parts(575_951_333, 1185054)
+			// Standard Error: 975_598
+			.saturating_add(Weight::from_parts(9_099_741, 0).saturating_mul(v.into()))
+			.saturating_add(T::DbWeight::get().reads(42_u64))
+			.saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(v.into())))
+			.saturating_add(T::DbWeight::get().writes(36_u64))
+			.saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(v.into())))
+			.saturating_add(Weight::from_parts(0, 6190080).saturating_mul(v.into()))
+	}
+}
+
+// For backwards compatibility and tests.
+impl WeightInfo for () {
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:0)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`)
+	fn on_initialize_valid_non_terminal() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `160552`
+		//  Estimated: `392238`
+		// Minimum execution time: 881_299_000 picoseconds.
+		Weight::from_parts(1_161_243_000, 392238)
+			.saturating_add(RocksDbWeight::get().reads(9_u64))
+			.saturating_add(RocksDbWeight::get().writes(3_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`)
+	fn on_initialize_valid_terminal() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `881924`
+		//  Estimated: `1799127`
+		// Minimum execution time: 1_974_549_000 picoseconds.
+		Weight::from_parts(2_755_105_000, 1799127)
+			.saturating_add(RocksDbWeight::get().reads(76_u64))
+			.saturating_add(RocksDbWeight::get().writes(71_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:32)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`)
+	fn on_initialize_invalid_terminal() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `882945`
+		//  Estimated: `192092149`
+		// Minimum execution time: 1_982_131_000 picoseconds.
+		Weight::from_parts(1_994_790_000, 192092149)
+			.saturating_add(RocksDbWeight::get().reads(106_u64))
+			.saturating_add(RocksDbWeight::get().writes(100_u64))
+	}
+	/// Storage: `MultiBlock::CurrentPhase` (r:1 w:0)
+	/// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1)
+	/// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::Round` (r:1 w:0)
+	/// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32)
+	/// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0)
+	/// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlock::DesiredTargets` (r:1 w:0)
+	/// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0)
+	/// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:31)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:31 w:31)
+	/// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`)
+	/// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1)
+	/// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`)
+	/// The range of component `v` is `[0, 31]`.
+	fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `164728 + v * (8538 ±0)`
+		//  Estimated: `1185054 + v * (6190080 ±0)`
+		// Minimum execution time: 574_462_000 picoseconds.
+		Weight::from_parts(575_951_333, 1185054)
+			// Standard Error: 975_598
+			.saturating_add(Weight::from_parts(9_099_741, 0).saturating_mul(v.into()))
+			.saturating_add(RocksDbWeight::get().reads(42_u64))
+			.saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(v.into())))
+			.saturating_add(RocksDbWeight::get().writes(36_u64))
+			.saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(v.into())))
+			.saturating_add(Weight::from_parts(0, 6190080).saturating_mul(v.into()))
+	}
+}
diff --git a/substrate/frame/election-provider-multi-block/src/weights/mod.rs b/substrate/frame/election-provider-multi-block/src/weights/mod.rs
new file mode 100644
index 00000000000..89b39600984
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/weights/mod.rs
@@ -0,0 +1,22 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![allow(unused)]
+pub(crate) mod measured;
+pub(crate) mod mel;
+pub(crate) mod zero;
+pub use zero::AllZeroWeights;
diff --git a/substrate/frame/election-provider-multi-block/src/weights/zero.rs b/substrate/frame/election-provider-multi-block/src/weights/zero.rs
new file mode 100644
index 00000000000..38210adde7c
--- /dev/null
+++ b/substrate/frame/election-provider-multi-block/src/weights/zero.rs
@@ -0,0 +1,89 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! A set of zero weights for all benchmarks of this pallet to be temporarily used in testing
+//! runtimes while benchmarking is being finalized.
+
+/// A `WeightInfo` impl with all zero weights
+pub struct AllZeroWeights;
+use frame_support::weights::Weight;
+
+impl crate::WeightInfo for AllZeroWeights {
+	fn manage() -> Weight {
+		Default::default()
+	}
+	fn on_initialize_into_signed() -> Weight {
+		Default::default()
+	}
+	fn on_initialize_into_signed_validation() -> Weight {
+		Default::default()
+	}
+	fn on_initialize_into_snapshot_msp() -> Weight {
+		Default::default()
+	}
+	fn on_initialize_into_snapshot_rest() -> Weight {
+		Default::default()
+	}
+	fn on_initialize_into_unsigned() -> Weight {
+		Default::default()
+	}
+	fn on_initialize_nothing() -> Weight {
+		Default::default()
+	}
+}
+
+impl crate::signed::WeightInfo for AllZeroWeights {
+	fn bail() -> Weight {
+		Default::default()
+	}
+	fn register_eject() -> Weight {
+		Default::default()
+	}
+	fn register_not_full() -> Weight {
+		Default::default()
+	}
+	fn submit_page() -> Weight {
+		Default::default()
+	}
+	fn unset_page() -> Weight {
+		Default::default()
+	}
+}
+
+impl crate::unsigned::WeightInfo for AllZeroWeights {
+	fn submit_unsigned() -> Weight {
+		Default::default()
+	}
+	fn validate_unsigned() -> Weight {
+		Default::default()
+	}
+}
+
+impl crate::verifier::WeightInfo for AllZeroWeights {
+	fn on_initialize_invalid_non_terminal(_: u32) -> Weight {
+		Default::default()
+	}
+	fn on_initialize_invalid_terminal() -> Weight {
+		Default::default()
+	}
+	fn on_initialize_valid_non_terminal() -> Weight {
+		Default::default()
+	}
+	fn on_initialize_valid_terminal() -> Weight {
+		Default::default()
+	}
+}
diff --git a/substrate/frame/election-provider-multi-phase/src/benchmarking.rs b/substrate/frame/election-provider-multi-phase/src/benchmarking.rs
index 222e79ab99c..20984f11a44 100644
--- a/substrate/frame/election-provider-multi-phase/src/benchmarking.rs
+++ b/substrate/frame/election-provider-multi-phase/src/benchmarking.rs
@@ -197,6 +197,7 @@ mod benchmarks {
 
 	#[benchmark]
 	fn on_initialize_nothing() {
+		T::DataProvider::set_next_election(sp_runtime::traits::Bounded::max_value());
 		assert!(CurrentPhase::<T>::get().is_off());
 
 		#[block]
@@ -288,9 +289,11 @@ mod benchmarks {
 	) -> Result<(), BenchmarkError> {
 		// We don't directly need the data-provider to be populated, but it is just easy to use it.
 		set_up_data_provider::<T>(v, t);
-		// Default bounds are unbounded.
-		let targets = T::DataProvider::electable_targets(DataProviderBounds::default())?;
-		let voters = T::DataProvider::electing_voters(DataProviderBounds::default())?;
+		// default bounds are unbounded.
+		let targets =
+			T::DataProvider::electable_targets(DataProviderBounds::default(), Zero::zero())?;
+		let voters = T::DataProvider::electing_voters(DataProviderBounds::default(), Zero::zero())?;
+
 		let desired_targets = T::DataProvider::desired_targets()?;
 		assert!(Snapshot::<T>::get().is_none());
 
@@ -300,8 +303,9 @@ mod benchmarks {
 		}
 
 		assert!(Snapshot::<T>::get().is_some());
-		assert_eq!(SnapshotMetadata::<T>::get().ok_or("metadata missing")?.voters, v);
-		assert_eq!(SnapshotMetadata::<T>::get().ok_or("metadata missing")?.targets, t);
+		// TODO: bring this back
+		// assert_eq!(SnapshotMetadata::<T>::get().ok_or("metadata missing")?.voters, v);
+		// assert_eq!(SnapshotMetadata::<T>::get().ok_or("metadata missing")?.targets, t);
 
 		Ok(())
 	}
@@ -343,7 +347,7 @@ mod benchmarks {
 
 		#[block]
 		{
-			result = <Pallet<T> as ElectionProvider>::elect();
+			result = <Pallet<T> as ElectionProvider>::elect(Zero::zero());
 		}
 
 		assert!(result.is_ok());
@@ -531,8 +535,9 @@ mod benchmarks {
 		}
 
 		assert!(Snapshot::<T>::get().is_some());
-		assert_eq!(SnapshotMetadata::<T>::get().ok_or("snapshot missing")?.voters, v);
-		assert_eq!(SnapshotMetadata::<T>::get().ok_or("snapshot missing")?.targets, t);
+		// TODO: bring this back
+		// assert_eq!(SnapshotMetadata::<T>::get().ok_or("snapshot missing")?.voters, v);
+		// assert_eq!(SnapshotMetadata::<T>::get().ok_or("snapshot missing")?.targets, t);
 
 		Ok(())
 	}
diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs
index 06cb2963d76..3a5103d2bb8 100644
--- a/substrate/frame/election-provider-multi-phase/src/lib.rs
+++ b/substrate/frame/election-provider-multi-phase/src/lib.rs
@@ -189,6 +189,18 @@
 //! Note that there could be an overlap between these sub-errors. For example, A
 //! `SnapshotUnavailable` can happen in both miner and feasibility check phase.
 //!
+//!	## Multi-page election support
+//!
+//! The [`frame_election_provider_support::ElectionDataProvider`] and
+//! [`frame_election_provider_support::ElectionProvider`] traits used by this pallet can support a
+//! multi-page election.
+//!
+//! However, this pallet only supports single-page election and data
+//! provider and all the relevant trait implementation and configurations reflect that assumption.
+//!
+//! If external callers request the election of a page index higher than 0, the election will fail
+//! with [`ElectionError::MultiPageNotSupported`].
+//!
 //! ## Future Plans
 //!
 //! **Emergency-phase recovery script**: This script should be taken out of staking-miner in
@@ -234,14 +246,14 @@ extern crate alloc;
 use alloc::{boxed::Box, vec::Vec};
 use codec::{Decode, Encode};
 use frame_election_provider_support::{
-	bounds::{CountBound, ElectionBounds, ElectionBoundsBuilder, SizeBound},
-	BoundedSupportsOf, DataProviderBounds, ElectionDataProvider, ElectionProvider,
-	ElectionProviderBase, InstantElectionProvider, NposSolution,
+	bounds::{CountBound, ElectionBounds, SizeBound},
+	BoundedSupports, BoundedSupportsOf, ElectionDataProvider, ElectionProvider,
+	InstantElectionProvider, NposSolution, PageIndex,
 };
 use frame_support::{
 	dispatch::DispatchClass,
 	ensure,
-	traits::{Currency, DefensiveResult, Get, OnUnbalanced, ReservableCurrency},
+	traits::{Currency, Get, OnUnbalanced, ReservableCurrency},
 	weights::Weight,
 	DefaultNoBound, EqNoBound, PartialEqNoBound,
 };
@@ -251,7 +263,7 @@ use sp_arithmetic::{
 	traits::{CheckedAdd, Zero},
 	UpperOf,
 };
-use sp_npos_elections::{BoundedSupports, ElectionScore, IdentifierT, Supports, VoteWeight};
+use sp_npos_elections::{ElectionScore, IdentifierT, Supports, VoteWeight};
 use sp_runtime::{
 	transaction_validity::{
 		InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity,
@@ -270,6 +282,8 @@ mod mock;
 #[macro_use]
 pub mod helpers;
 
+/// This pallet only supports a single page election flow.
+pub(crate) const SINGLE_PAGE: u32 = 0;
 const LOG_TARGET: &str = "runtime::election-provider";
 
 pub mod migrations;
@@ -287,7 +301,6 @@ pub use weights::WeightInfo;
 
 /// The solution type used by this crate.
 pub type SolutionOf<T> = <T as MinerConfig>::Solution;
-
 /// The voter index. Derived from [`SolutionOf`].
 pub type SolutionVoterIndexOf<T> = <SolutionOf<T> as NposSolution>::VoterIndex;
 /// The target index. Derived from [`SolutionOf`].
@@ -295,8 +308,14 @@ pub type SolutionTargetIndexOf<T> = <SolutionOf<T> as NposSolution>::TargetIndex
 /// The accuracy of the election, when submitted from offchain. Derived from [`SolutionOf`].
 pub type SolutionAccuracyOf<T> =
 	<SolutionOf<<T as crate::Config>::MinerConfig> as NposSolution>::Accuracy;
+/// A ready solution parameterized with this pallet's miner config.
+pub type ReadySolutionOf<T> = ReadySolution<
+	<T as MinerConfig>::AccountId,
+	<T as MinerConfig>::MaxWinners,
+	<T as MinerConfig>::MaxBackersPerWinner,
+>;
 /// The fallback election type.
-pub type FallbackErrorOf<T> = <<T as crate::Config>::Fallback as ElectionProviderBase>::Error;
+pub type FallbackErrorOf<T> = <<T as crate::Config>::Fallback as ElectionProvider>::Error;
 
 /// Configuration for the benchmarks of the pallet.
 pub trait BenchmarkingConfig {
@@ -433,17 +452,18 @@ impl<C: Default> Default for RawSolution<C> {
 	DefaultNoBound,
 	scale_info::TypeInfo,
 )]
-#[scale_info(skip_type_params(AccountId, MaxWinners))]
-pub struct ReadySolution<AccountId, MaxWinners>
+#[scale_info(skip_type_params(AccountId, MaxWinners, MaxBackersPerWinner))]
+pub struct ReadySolution<AccountId, MaxWinners, MaxBackersPerWinner>
 where
 	AccountId: IdentifierT,
 	MaxWinners: Get<u32>,
+	MaxBackersPerWinner: Get<u32>,
 {
 	/// The final supports of the solution.
 	///
 	/// This is target-major vector, storing each winners, total backing, and each individual
 	/// backer.
-	pub supports: BoundedSupports<AccountId, MaxWinners>,
+	pub supports: BoundedSupports<AccountId, MaxWinners, MaxBackersPerWinner>,
 	/// The score of the solution.
 	///
 	/// This is needed to potentially challenge the solution.
@@ -494,13 +514,15 @@ pub enum ElectionError<T: Config> {
 	DataProvider(&'static str),
 	/// An error nested in the fallback.
 	Fallback(FallbackErrorOf<T>),
+	/// An error occurred when requesting an election result. The caller expects a multi-paged
+	/// election, which this pallet does not support.
+	MultiPageNotSupported,
 	/// No solution has been queued.
 	NothingQueued,
 }
 
 // NOTE: we have to do this manually because of the additional where clause needed on
 // `FallbackErrorOf<T>`.
-#[cfg(test)]
 impl<T: Config> PartialEq for ElectionError<T>
 where
 	FallbackErrorOf<T>: PartialEq,
@@ -512,6 +534,7 @@ where
 			(Miner(x), Miner(y)) if x == y => true,
 			(DataProvider(x), DataProvider(y)) if x == y => true,
 			(Fallback(x), Fallback(y)) if x == y => true,
+			(MultiPageNotSupported, MultiPageNotSupported) => true,
 			_ => false,
 		}
 	}
@@ -616,6 +639,7 @@ pub mod pallet {
 			AccountId = Self::AccountId,
 			MaxVotesPerVoter = <Self::DataProvider as ElectionDataProvider>::MaxVotesPerVoter,
 			MaxWinners = Self::MaxWinners,
+			MaxBackersPerWinner = Self::MaxBackersPerWinner,
 		>;
 
 		/// Maximum number of signed submissions that can be queued.
@@ -652,20 +676,23 @@ pub mod pallet {
 		#[pallet::constant]
 		type SignedDepositWeight: Get<BalanceOf<Self>>;
 
-		/// The maximum number of winners that can be elected by this `ElectionProvider`
-		/// implementation.
+		/// Maximum number of winners that an election supports.
 		///
 		/// Note: This must always be greater or equal to `T::DataProvider::desired_targets()`.
 		#[pallet::constant]
 		type MaxWinners: Get<u32>;
 
+		/// Maximum number of voters that can support a winner in an election solution.
+		///
+		/// This is needed to ensure election computation is bounded.
+		#[pallet::constant]
+		type MaxBackersPerWinner: Get<u32>;
+
 		/// Something that calculates the signed deposit base based on the signed submissions queue
 		/// size.
 		type SignedDepositBase: Convert<usize, BalanceOf<Self>>;
 
 		/// The maximum number of electing voters and electable targets to put in the snapshot.
-		/// At the moment, snapshots are only over a single block, but once multi-block elections
-		/// are introduced they will take place over multiple blocks.
 		type ElectionBounds: Get<ElectionBounds>;
 
 		/// Handler for the slashed deposits.
@@ -685,7 +712,8 @@ pub mod pallet {
 			AccountId = Self::AccountId,
 			BlockNumber = BlockNumberFor<Self>,
 			DataProvider = Self::DataProvider,
-			MaxWinners = Self::MaxWinners,
+			MaxBackersPerWinner = Self::MaxBackersPerWinner,
+			MaxWinnersPerPage = Self::MaxWinners,
 		>;
 
 		/// Configuration of the governance-only fallback.
@@ -696,7 +724,8 @@ pub mod pallet {
 			AccountId = Self::AccountId,
 			BlockNumber = BlockNumberFor<Self>,
 			DataProvider = Self::DataProvider,
-			MaxWinners = Self::MaxWinners,
+			MaxWinnersPerPage = Self::MaxWinners,
+			MaxBackersPerWinner = Self::MaxBackersPerWinner,
 		>;
 
 		/// OCW election solution miner algorithm implementation.
@@ -750,9 +779,10 @@ pub mod pallet {
 
 			log!(
 				trace,
-				"current phase {:?}, next election {:?}, metadata: {:?}",
+				"current phase {:?}, next election {:?}, queued? {:?}, metadata: {:?}",
 				current_phase,
 				next_election,
+				QueuedSolution::<T>::get().map(|rs| (rs.supports.len(), rs.compute, rs.score)),
 				SnapshotMetadata::<T>::get()
 			);
 			match current_phase {
@@ -978,8 +1008,9 @@ pub mod pallet {
 			T::ForceOrigin::ensure_origin(origin)?;
 			ensure!(CurrentPhase::<T>::get().is_emergency(), Error::<T>::CallNotAllowed);
 
-			// bound supports with T::MaxWinners
-			let supports = supports.try_into().map_err(|_| Error::<T>::TooManyWinners)?;
+			// bound supports with T::MaxWinners.
+			let supports: BoundedSupportsOf<Self> =
+				supports.try_into().map_err(|_| Error::<T>::TooManyWinners)?;
 
 			// Note: we don't `rotate_round` at this point; the next call to
 			// `ElectionProvider::elect` will succeed and take care of that.
@@ -1082,35 +1113,21 @@ pub mod pallet {
 		/// calling [`Call::set_emergency_election_result`].
 		#[pallet::call_index(4)]
 		#[pallet::weight(T::DbWeight::get().reads_writes(1, 1))]
-		pub fn governance_fallback(
-			origin: OriginFor<T>,
-			maybe_max_voters: Option<u32>,
-			maybe_max_targets: Option<u32>,
-		) -> DispatchResult {
+		pub fn governance_fallback(origin: OriginFor<T>) -> DispatchResult {
 			T::ForceOrigin::ensure_origin(origin)?;
 			ensure!(CurrentPhase::<T>::get().is_emergency(), Error::<T>::CallNotAllowed);
 
-			let election_bounds = ElectionBoundsBuilder::default()
-				.voters_count(maybe_max_voters.unwrap_or(u32::MAX).into())
-				.targets_count(maybe_max_targets.unwrap_or(u32::MAX).into())
-				.build();
+			let RoundSnapshot { voters, targets } =
+				Snapshot::<T>::get().ok_or(Error::<T>::MissingSnapshotMetadata)?;
+			let desired_targets =
+				DesiredTargets::<T>::get().ok_or(Error::<T>::MissingSnapshotMetadata)?;
 
-			let supports = T::GovernanceFallback::instant_elect(
-				election_bounds.voters,
-				election_bounds.targets,
-			)
-			.map_err(|e| {
+			let supports = T::GovernanceFallback::instant_elect(voters, targets, desired_targets)
+				.map_err(|e| {
 				log!(error, "GovernanceFallback failed: {:?}", e);
 				Error::<T>::FallbackFailed
 			})?;
 
-			// transform BoundedVec<_, T::GovernanceFallback::MaxWinners> into
-			// `BoundedVec<_, T::MaxWinners>`
-			let supports: BoundedVec<_, T::MaxWinners> = supports
-				.into_inner()
-				.try_into()
-				.defensive_map_err(|_| Error::<T>::BoundNotMet)?;
-
 			let solution = ReadySolution {
 				supports,
 				score: Default::default(),
@@ -1265,8 +1282,7 @@ pub mod pallet {
 	///
 	/// Always sorted by score.
 	#[pallet::storage]
-	pub type QueuedSolution<T: Config> =
-		StorageValue<_, ReadySolution<T::AccountId, T::MaxWinners>>;
+	pub type QueuedSolution<T: Config> = StorageValue<_, ReadySolutionOf<T::MinerConfig>>;
 
 	/// Snapshot data of the round.
 	///
@@ -1398,7 +1414,7 @@ impl<T: Config> Pallet<T> {
 	/// Current best solution, signed or unsigned, queued to be returned upon `elect`.
 	///
 	/// Always sorted by score.
-	pub fn queued_solution() -> Option<ReadySolution<T::AccountId, T::MaxWinners>> {
+	pub fn queued_solution() -> Option<ReadySolutionOf<T::MinerConfig>> {
 		QueuedSolution::<T>::get()
 	}
 
@@ -1504,11 +1520,12 @@ impl<T: Config> Pallet<T> {
 	/// Parts of [`create_snapshot`] that happen outside of this pallet.
 	///
 	/// Extracted for easier weight calculation.
+	///
+	/// Note: this pallet only supports one page of voter and target snapshots.
 	fn create_snapshot_external(
 	) -> Result<(Vec<T::AccountId>, Vec<VoterOf<T>>, u32), ElectionError<T>> {
 		let election_bounds = T::ElectionBounds::get();
-
-		let targets = T::DataProvider::electable_targets(election_bounds.targets)
+		let targets = T::DataProvider::electable_targets_stateless(election_bounds.targets)
 			.and_then(|t| {
 				election_bounds.ensure_targets_limits(
 					CountBound(t.len() as u32),
@@ -1518,7 +1535,7 @@ impl<T: Config> Pallet<T> {
 			})
 			.map_err(ElectionError::DataProvider)?;
 
-		let voters = T::DataProvider::electing_voters(election_bounds.voters)
+		let voters = T::DataProvider::electing_voters_stateless(election_bounds.voters)
 			.and_then(|v| {
 				election_bounds.ensure_voters_limits(
 					CountBound(v.len() as u32),
@@ -1528,7 +1545,7 @@ impl<T: Config> Pallet<T> {
 			})
 			.map_err(ElectionError::DataProvider)?;
 
-		let mut desired_targets = <Pallet<T> as ElectionProviderBase>::desired_targets_checked()
+		let mut desired_targets = <Pallet<T> as ElectionProvider>::desired_targets_checked()
 			.map_err(|e| ElectionError::DataProvider(e))?;
 
 		// If `desired_targets` > `targets.len()`, cap `desired_targets` to that level and emit a
@@ -1583,7 +1600,7 @@ impl<T: Config> Pallet<T> {
 	pub fn feasibility_check(
 		raw_solution: RawSolution<SolutionOf<T::MinerConfig>>,
 		compute: ElectionCompute,
-	) -> Result<ReadySolution<T::AccountId, T::MaxWinners>, FeasibilityError> {
+	) -> Result<ReadySolutionOf<T::MinerConfig>, FeasibilityError> {
 		let desired_targets =
 			DesiredTargets::<T>::get().ok_or(FeasibilityError::SnapshotUnavailable)?;
 
@@ -1630,40 +1647,42 @@ impl<T: Config> Pallet<T> {
 		QueuedSolution::<T>::take()
 			.ok_or(ElectionError::<T>::NothingQueued)
 			.or_else(|_| {
-				// default data provider bounds are unbounded. calling `instant_elect` with
-				// unbounded data provider bounds means that the on-chain `T:Bounds` configs will
-				// *not* be overwritten.
-				T::Fallback::instant_elect(
-					DataProviderBounds::default(),
-					DataProviderBounds::default(),
-				)
-				.map_err(|fe| ElectionError::Fallback(fe))
-				.and_then(|supports| {
-					Ok(ReadySolution {
-						supports,
-						score: Default::default(),
-						compute: ElectionCompute::Fallback,
+				log!(warn, "No solution queued, falling back to instant fallback.",);
+				let (voters, targets, desired_targets) = if T::Fallback::bother() {
+					let RoundSnapshot { voters, targets } = Snapshot::<T>::get().ok_or(
+						ElectionError::<T>::Feasibility(FeasibilityError::SnapshotUnavailable),
+					)?;
+					let desired_targets = DesiredTargets::<T>::get().ok_or(
+						ElectionError::<T>::Feasibility(FeasibilityError::SnapshotUnavailable),
+					)?;
+					(voters, targets, desired_targets)
+				} else {
+					(Default::default(), Default::default(), Default::default())
+				};
+				T::Fallback::instant_elect(voters, targets, desired_targets)
+					.map_err(|fe| ElectionError::Fallback(fe))
+					.and_then(|supports| {
+						Ok(ReadySolution {
+							supports,
+							score: Default::default(),
+							compute: ElectionCompute::Fallback,
+						})
 					})
-				})
 			})
 			.map(|ReadySolution { compute, score, supports }| {
 				Self::deposit_event(Event::ElectionFinalized { compute, score });
-				if Round::<T>::get() != 1 {
-					log!(info, "Finalized election round with compute {:?}.", compute);
-				}
+				log!(info, "Finalized election round with compute {:?}.", compute);
 				supports
 			})
 			.map_err(|err| {
 				Self::deposit_event(Event::ElectionFailed);
-				if Round::<T>::get() != 1 {
-					log!(warn, "Failed to finalize election round. reason {:?}", err);
-				}
+				log!(warn, "Failed to finalize election round. reason {:?}", err);
 				err
 			})
 	}
 
 	/// record the weight of the given `supports`.
-	fn weigh_supports(supports: &Supports<T::AccountId>) {
+	fn weigh_supports(supports: &BoundedSupportsOf<Self>) {
 		let active_voters = supports
 			.iter()
 			.map(|(_, x)| x)
@@ -1755,35 +1774,41 @@ impl<T: Config> Pallet<T> {
 	}
 }
 
-impl<T: Config> ElectionProviderBase for Pallet<T> {
+impl<T: Config> ElectionProvider for Pallet<T> {
 	type AccountId = T::AccountId;
 	type BlockNumber = BlockNumberFor<T>;
 	type Error = ElectionError<T>;
-	type MaxWinners = T::MaxWinners;
+	type MaxWinnersPerPage = T::MaxWinners;
+	type MaxBackersPerWinner = T::MaxBackersPerWinner;
+	type Pages = sp_core::ConstU32<1>;
 	type DataProvider = T::DataProvider;
-}
 
-impl<T: Config> ElectionProvider for Pallet<T> {
-	fn ongoing() -> bool {
-		match CurrentPhase::<T>::get() {
-			Phase::Off => false,
-			_ => true,
-		}
-	}
+	fn elect(page: PageIndex) -> Result<BoundedSupportsOf<Self>, Self::Error> {
+		// Note: this pallet **MUST** only by used in the single-page mode.
+		ensure!(page == SINGLE_PAGE, ElectionError::<T>::MultiPageNotSupported);
 
-	fn elect() -> Result<BoundedSupportsOf<Self>, Self::Error> {
-		match Self::do_elect() {
-			Ok(supports) => {
+		let res = match Self::do_elect() {
+			Ok(bounded_supports) => {
 				// All went okay, record the weight, put sign to be Off, clean snapshot, etc.
-				Self::weigh_supports(&supports);
+				Self::weigh_supports(&bounded_supports);
 				Self::rotate_round();
-				Ok(supports)
+				Ok(bounded_supports)
 			},
 			Err(why) => {
 				log!(error, "Entering emergency mode: {:?}", why);
 				Self::phase_transition(Phase::Emergency);
 				Err(why)
 			},
+		};
+
+		log!(info, "ElectionProvider::elect({}) => {:?}", page, res.as_ref().map(|s| s.len()));
+		res
+	}
+
+	fn ongoing() -> bool {
+		match CurrentPhase::<T>::get() {
+			Phase::Off => false,
+			_ => true,
 		}
 	}
 }
@@ -1803,7 +1828,6 @@ mod feasibility_check {
 	//! All of the tests here should be dedicated to only testing the feasibility check and nothing
 	//! more. The best way to audit and review these tests is to try and come up with a solution
 	//! that is invalid, but gets through the system as valid.
-
 	use super::*;
 	use crate::mock::{
 		raw_solution, roll_to, EpochLength, ExtBuilder, MultiPhase, Runtime, SignedPhase,
@@ -2007,6 +2031,7 @@ mod tests {
 		},
 		Phase,
 	};
+	use frame_election_provider_support::bounds::ElectionBoundsBuilder;
 	use frame_support::{assert_noop, assert_ok};
 	use sp_npos_elections::{BalancingConfig, Support};
 
@@ -2068,7 +2093,7 @@ mod tests {
 			assert_eq!(CurrentPhase::<Runtime>::get(), Phase::Unsigned((true, 25)));
 			assert!(Snapshot::<Runtime>::get().is_some());
 
-			assert_ok!(MultiPhase::elect());
+			assert_ok!(MultiPhase::elect(SINGLE_PAGE));
 
 			assert!(CurrentPhase::<Runtime>::get().is_off());
 			assert!(Snapshot::<Runtime>::get().is_none());
@@ -2132,7 +2157,7 @@ mod tests {
 			roll_to(30);
 			assert!(CurrentPhase::<Runtime>::get().is_unsigned_open_at(20));
 
-			assert_ok!(MultiPhase::elect());
+			assert_ok!(MultiPhase::elect(SINGLE_PAGE));
 
 			assert!(CurrentPhase::<Runtime>::get().is_off());
 			assert!(Snapshot::<Runtime>::get().is_none());
@@ -2179,7 +2204,7 @@ mod tests {
 			roll_to(30);
 			assert!(CurrentPhase::<Runtime>::get().is_signed());
 
-			assert_ok!(MultiPhase::elect());
+			assert_ok!(MultiPhase::elect(SINGLE_PAGE));
 
 			assert!(CurrentPhase::<Runtime>::get().is_off());
 			assert!(Snapshot::<Runtime>::get().is_none());
@@ -2217,23 +2242,20 @@ mod tests {
 			roll_to(30);
 			assert!(CurrentPhase::<Runtime>::get().is_off());
 
-			// This module is now only capable of doing on-chain backup.
-			assert_ok!(MultiPhase::elect());
+			// This module is now cannot even do onchain fallback, as no snapshot is there
+			assert_eq!(
+				MultiPhase::elect(SINGLE_PAGE),
+				Err(ElectionError::<Runtime>::Feasibility(FeasibilityError::SnapshotUnavailable))
+			);
 
-			assert!(CurrentPhase::<Runtime>::get().is_off());
+			// this puts us in emergency now.
+			assert!(CurrentPhase::<Runtime>::get().is_emergency());
 
 			assert_eq!(
 				multi_phase_events(),
 				vec![
-					Event::ElectionFinalized {
-						compute: ElectionCompute::Fallback,
-						score: ElectionScore {
-							minimal_stake: 0,
-							sum_stake: 0,
-							sum_stake_squared: 0
-						}
-					},
-					Event::PhaseTransitioned { from: Phase::Off, to: Phase::Off, round: 2 },
+					Event::ElectionFailed,
+					Event::PhaseTransitioned { from: Phase::Off, to: Phase::Emergency, round: 1 }
 				]
 			);
 		});
@@ -2254,7 +2276,7 @@ mod tests {
 			assert_eq!(Round::<Runtime>::get(), 1);
 
 			// An unexpected call to elect.
-			assert_ok!(MultiPhase::elect());
+			assert_ok!(MultiPhase::elect(SINGLE_PAGE));
 
 			// We surely can't have any feasible solutions. This will cause an on-chain election.
 			assert_eq!(
@@ -2305,7 +2327,7 @@ mod tests {
 			}
 
 			// an unexpected call to elect.
-			assert_ok!(MultiPhase::elect());
+			assert_ok!(MultiPhase::elect(SINGLE_PAGE));
 
 			// all storage items must be cleared.
 			assert_eq!(Round::<Runtime>::get(), 2);
@@ -2376,7 +2398,7 @@ mod tests {
 			));
 
 			roll_to(30);
-			assert_ok!(MultiPhase::elect());
+			assert_ok!(MultiPhase::elect(SINGLE_PAGE));
 
 			assert_eq!(
 				multi_phase_events(),
@@ -2433,7 +2455,7 @@ mod tests {
 			));
 			assert!(QueuedSolution::<Runtime>::get().is_some());
 
-			assert_ok!(MultiPhase::elect());
+			assert_ok!(MultiPhase::elect(SINGLE_PAGE));
 
 			assert_eq!(
 				multi_phase_events(),
@@ -2467,6 +2489,35 @@ mod tests {
 		})
 	}
 
+	#[test]
+	fn try_elect_multi_page_fails() {
+		let prepare_election = || {
+			roll_to_signed();
+			assert!(Snapshot::<Runtime>::get().is_some());
+
+			// submit solution and assert it is queued and ready for elect to be called.
+			let (solution, _, _) = MultiPhase::mine_solution().unwrap();
+			assert_ok!(MultiPhase::submit(
+				crate::mock::RuntimeOrigin::signed(99),
+				Box::new(solution),
+			));
+			roll_to(30);
+			assert!(QueuedSolution::<Runtime>::get().is_some());
+		};
+
+		ExtBuilder::default().onchain_fallback(false).build_and_execute(|| {
+			prepare_election();
+			// single page elect call works as expected.
+			assert_ok!(MultiPhase::elect(SINGLE_PAGE));
+		});
+
+		ExtBuilder::default().onchain_fallback(false).build_and_execute(|| {
+			prepare_election();
+			// multi page calls will fail with multipage not supported error.
+			assert_noop!(MultiPhase::elect(SINGLE_PAGE + 1), ElectionError::MultiPageNotSupported);
+		})
+	}
+
 	#[test]
 	fn fallback_strategy_works() {
 		ExtBuilder::default().onchain_fallback(true).build_and_execute(|| {
@@ -2475,15 +2526,16 @@ mod tests {
 
 			// Zilch solutions thus far, but we get a result.
 			assert!(QueuedSolution::<Runtime>::get().is_none());
-			let supports = MultiPhase::elect().unwrap();
+			let supports = MultiPhase::elect(SINGLE_PAGE).unwrap();
 
-			assert_eq!(
-				supports,
-				vec![
-					(30, Support { total: 40, voters: vec![(2, 5), (4, 5), (30, 30)] }),
-					(40, Support { total: 60, voters: vec![(2, 5), (3, 10), (4, 5), (40, 40)] })
-				]
-			);
+			let expected_supports = vec![
+				(30, Support { total: 40, voters: vec![(2, 5), (4, 5), (30, 30)] }),
+				(40, Support { total: 60, voters: vec![(2, 5), (3, 10), (4, 5), (40, 40)] }),
+			]
+			.try_into()
+			.unwrap();
+
+			assert_eq!(supports, expected_supports);
 
 			assert_eq!(
 				multi_phase_events(),
@@ -2517,7 +2569,10 @@ mod tests {
 
 			// Zilch solutions thus far.
 			assert!(QueuedSolution::<Runtime>::get().is_none());
-			assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::Fallback("NoFallback."));
+			assert_eq!(
+				MultiPhase::elect(SINGLE_PAGE).unwrap_err(),
+				ElectionError::Fallback("NoFallback.")
+			);
 			// phase is now emergency.
 			assert_eq!(CurrentPhase::<Runtime>::get(), Phase::Emergency);
 			// snapshot is still there until election finalizes.
@@ -2551,7 +2606,10 @@ mod tests {
 
 			// Zilch solutions thus far.
 			assert!(QueuedSolution::<Runtime>::get().is_none());
-			assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::Fallback("NoFallback."));
+			assert_eq!(
+				MultiPhase::elect(SINGLE_PAGE).unwrap_err(),
+				ElectionError::Fallback("NoFallback.")
+			);
 
 			// phase is now emergency.
 			assert_eq!(CurrentPhase::<Runtime>::get(), Phase::Emergency);
@@ -2560,16 +2618,16 @@ mod tests {
 
 			// no single account can trigger this
 			assert_noop!(
-				MultiPhase::governance_fallback(RuntimeOrigin::signed(99), None, None),
+				MultiPhase::governance_fallback(RuntimeOrigin::signed(99)),
 				DispatchError::BadOrigin
 			);
 
 			// only root can
-			assert_ok!(MultiPhase::governance_fallback(RuntimeOrigin::root(), None, None));
+			assert_ok!(MultiPhase::governance_fallback(RuntimeOrigin::root()));
 			// something is queued now
 			assert!(QueuedSolution::<Runtime>::get().is_some());
 			// next election call with fix everything.;
-			assert!(MultiPhase::elect().is_ok());
+			assert!(MultiPhase::elect(SINGLE_PAGE).is_ok());
 			assert_eq!(CurrentPhase::<Runtime>::get(), Phase::Off);
 
 			assert_eq!(
@@ -2620,22 +2678,17 @@ mod tests {
 			roll_to(25);
 			assert_eq!(CurrentPhase::<Runtime>::get(), Phase::Off);
 
-			// On-chain backup works though.
-			let supports = MultiPhase::elect().unwrap();
-			assert!(supports.len() > 0);
+			// On-chain backup will fail similarly.
+			assert_eq!(
+				MultiPhase::elect(SINGLE_PAGE).unwrap_err(),
+				ElectionError::<Runtime>::Feasibility(FeasibilityError::SnapshotUnavailable)
+			);
 
 			assert_eq!(
 				multi_phase_events(),
 				vec![
-					Event::ElectionFinalized {
-						compute: ElectionCompute::Fallback,
-						score: ElectionScore {
-							minimal_stake: 0,
-							sum_stake: 0,
-							sum_stake_squared: 0
-						}
-					},
-					Event::PhaseTransitioned { from: Phase::Off, to: Phase::Off, round: 2 },
+					Event::ElectionFailed,
+					Event::PhaseTransitioned { from: Phase::Off, to: Phase::Emergency, round: 1 },
 				]
 			);
 		});
@@ -2660,7 +2713,7 @@ mod tests {
 			assert_eq!(CurrentPhase::<Runtime>::get(), Phase::Off);
 
 			roll_to(29);
-			let err = MultiPhase::elect().unwrap_err();
+			let err = MultiPhase::elect(SINGLE_PAGE).unwrap_err();
 			assert_eq!(err, ElectionError::Fallback("NoFallback."));
 			assert_eq!(CurrentPhase::<Runtime>::get(), Phase::Emergency);
 
diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs
index d0797e100fc..d244af0b403 100644
--- a/substrate/frame/election-provider-multi-phase/src/mock.rs
+++ b/substrate/frame/election-provider-multi-phase/src/mock.rs
@@ -18,7 +18,7 @@
 use super::*;
 use crate::{self as multi_phase, signed::GeometricDepositBase, unsigned::MinerConfig};
 use frame_election_provider_support::{
-	bounds::{DataProviderBounds, ElectionBounds},
+	bounds::{DataProviderBounds, ElectionBounds, ElectionBoundsBuilder},
 	data_provider, onchain, ElectionDataProvider, NposSolution, SequentialPhragmen,
 };
 pub use frame_support::derive_impl;
@@ -35,7 +35,7 @@ use sp_core::{
 		testing::{PoolState, TestOffchainExt, TestTransactionPoolExt},
 		OffchainDbExt, OffchainWorkerExt, TransactionPoolExt,
 	},
-	H256,
+	ConstBool, H256,
 };
 use sp_npos_elections::{
 	assignment_ratio_to_staked_normalized, seq_phragmen, to_supports, BalancingConfig,
@@ -116,7 +116,7 @@ pub fn roll_to_round(n: u32) {
 
 	while Round::<Runtime>::get() != n {
 		roll_to_signed();
-		frame_support::assert_ok!(MultiPhase::elect());
+		frame_support::assert_ok!(MultiPhase::elect(Zero::zero()));
 	}
 }
 
@@ -296,6 +296,8 @@ parameter_types! {
 
 	#[derive(Debug)]
 	pub static MaxWinners: u32 = 200;
+	#[derive(Debug)]
+	pub static MaxBackersPerWinner: u32 = 200;
 	// `ElectionBounds` and `OnChainElectionsBounds` are defined separately to set them independently in the tests.
 	pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build();
 	pub static OnChainElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build();
@@ -309,34 +311,52 @@ impl onchain::Config for OnChainSeqPhragmen {
 	type Solver = SequentialPhragmen<AccountId, SolutionAccuracyOf<Runtime>, Balancing>;
 	type DataProvider = StakingMock;
 	type WeightInfo = ();
-	type MaxWinners = MaxWinners;
+	type MaxWinnersPerPage = MaxWinners;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
+	type Sort = ConstBool<true>;
 	type Bounds = OnChainElectionsBounds;
 }
 
 pub struct MockFallback;
-impl ElectionProviderBase for MockFallback {
-	type BlockNumber = BlockNumber;
+impl ElectionProvider for MockFallback {
 	type AccountId = AccountId;
+	type BlockNumber = BlockNumber;
 	type Error = &'static str;
+	type MaxWinnersPerPage = MaxWinners;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
+	type Pages = ConstU32<1>;
 	type DataProvider = StakingMock;
-	type MaxWinners = MaxWinners;
+
+	fn elect(_remaining: PageIndex) -> Result<BoundedSupportsOf<Self>, Self::Error> {
+		unimplemented!()
+	}
+
+	fn ongoing() -> bool {
+		false
+	}
 }
 
 impl InstantElectionProvider for MockFallback {
 	fn instant_elect(
-		voters_bounds: DataProviderBounds,
-		targets_bounds: DataProviderBounds,
+		voters: Vec<VoterOf<Runtime>>,
+		targets: Vec<AccountId>,
+		desired_targets: u32,
 	) -> Result<BoundedSupportsOf<Self>, Self::Error> {
 		if OnChainFallback::get() {
 			onchain::OnChainExecution::<OnChainSeqPhragmen>::instant_elect(
-				voters_bounds,
-				targets_bounds,
+				voters,
+				targets,
+				desired_targets,
 			)
 			.map_err(|_| "onchain::OnChainExecution failed.")
 		} else {
 			Err("NoFallback.")
 		}
 	}
+
+	fn bother() -> bool {
+		OnChainFallback::get()
+	}
 }
 
 parameter_types! {
@@ -362,6 +382,7 @@ impl MinerConfig for Runtime {
 	type MaxWeight = MinerMaxWeight;
 	type MaxVotesPerVoter = <StakingMock as ElectionDataProvider>::MaxVotesPerVoter;
 	type MaxWinners = MaxWinners;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
 	type Solution = TestNposSolution;
 
 	fn solution_weight(v: u32, t: u32, a: u32, d: u32) -> Weight {
@@ -404,6 +425,7 @@ impl crate::Config for Runtime {
 		frame_election_provider_support::onchain::OnChainExecution<OnChainSeqPhragmen>;
 	type ForceOrigin = frame_system::EnsureRoot<AccountId>;
 	type MaxWinners = MaxWinners;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
 	type MinerConfig = Self;
 	type Solver = SequentialPhragmen<AccountId, SolutionAccuracyOf<Runtime>, Balancing>;
 	type ElectionBounds = ElectionsBounds;
@@ -455,7 +477,12 @@ impl ElectionDataProvider for StakingMock {
 	type AccountId = AccountId;
 	type MaxVotesPerVoter = MaxNominations;
 
-	fn electable_targets(bounds: DataProviderBounds) -> data_provider::Result<Vec<AccountId>> {
+	fn electable_targets(
+		bounds: DataProviderBounds,
+		remaining_pages: PageIndex,
+	) -> data_provider::Result<Vec<AccountId>> {
+		assert!(remaining_pages.is_zero());
+
 		let targets = Targets::get();
 
 		if !DataProviderAllowBadData::get() &&
@@ -467,7 +494,12 @@ impl ElectionDataProvider for StakingMock {
 		Ok(targets)
 	}
 
-	fn electing_voters(bounds: DataProviderBounds) -> data_provider::Result<Vec<VoterOf<Runtime>>> {
+	fn electing_voters(
+		bounds: DataProviderBounds,
+		remaining_pages: PageIndex,
+	) -> data_provider::Result<Vec<VoterOf<Runtime>>> {
+		assert!(remaining_pages.is_zero());
+
 		let mut voters = Voters::get();
 
 		if !DataProviderAllowBadData::get() {
@@ -582,6 +614,10 @@ impl ExtBuilder {
 		<SignedMaxWeight>::set(weight);
 		self
 	}
+	pub fn max_backers_per_winner(self, max: u32) -> Self {
+		MaxBackersPerWinner::set(max);
+		self
+	}
 	pub fn build(self) -> sp_io::TestExternalities {
 		sp_tracing::try_init_simple();
 		let mut storage =
diff --git a/substrate/frame/election-provider-multi-phase/src/signed.rs b/substrate/frame/election-provider-multi-phase/src/signed.rs
index c685791bbdd..5efe848c0e6 100644
--- a/substrate/frame/election-provider-multi-phase/src/signed.rs
+++ b/substrate/frame/election-provider-multi-phase/src/signed.rs
@@ -21,7 +21,7 @@ use core::marker::PhantomData;
 
 use crate::{
 	unsigned::MinerConfig, Config, ElectionCompute, Pallet, QueuedSolution, RawSolution,
-	ReadySolution, SignedSubmissionIndices, SignedSubmissionNextIndex, SignedSubmissionsMap,
+	ReadySolutionOf, SignedSubmissionIndices, SignedSubmissionNextIndex, SignedSubmissionsMap,
 	SnapshotMetadata, SolutionOf, SolutionOrSnapshotSize, Weight, WeightInfo,
 };
 use alloc::{
@@ -490,7 +490,7 @@ impl<T: Config> Pallet<T> {
 	///
 	/// Infallible
 	pub fn finalize_signed_phase_accept_solution(
-		ready_solution: ReadySolution<T::AccountId, T::MaxWinners>,
+		ready_solution: ReadySolutionOf<T::MinerConfig>,
 		who: &T::AccountId,
 		deposit: BalanceOf<T>,
 		call_fee: BalanceOf<T>,
@@ -566,9 +566,9 @@ impl<T: Config> Pallet<T> {
 mod tests {
 	use super::*;
 	use crate::{
-		mock::*, CurrentPhase, ElectionBoundsBuilder, ElectionCompute, ElectionError, Error, Event,
-		Perbill, Phase, Round,
+		mock::*, CurrentPhase, ElectionCompute, ElectionError, Error, Event, Perbill, Phase, Round,
 	};
+	use frame_election_provider_support::bounds::ElectionBoundsBuilder;
 	use frame_support::{assert_noop, assert_ok, assert_storage_noop};
 	use sp_runtime::Percent;
 
diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs
index 191131ed3ac..5aabc3454d4 100644
--- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs
+++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs
@@ -19,8 +19,8 @@
 
 use crate::{
 	helpers, Call, Config, CurrentPhase, DesiredTargets, ElectionCompute, Error, FeasibilityError,
-	Pallet, QueuedSolution, RawSolution, ReadySolution, Round, RoundSnapshot, Snapshot,
-	SolutionAccuracyOf, SolutionOf, SolutionOrSnapshotSize, Weight,
+	Pallet, QueuedSolution, RawSolution, ReadySolution, ReadySolutionOf, Round, RoundSnapshot,
+	Snapshot, SolutionAccuracyOf, SolutionOf, SolutionOrSnapshotSize, Weight,
 };
 use alloc::{boxed::Box, vec::Vec};
 use codec::Encode;
@@ -98,6 +98,8 @@ pub enum MinerError {
 	NoMoreVoters,
 	/// An error from the solver.
 	Solver,
+	/// Desired targets are mire than the maximum allowed winners.
+	TooManyDesiredTargets,
 }
 
 impl From<sp_npos_elections::Error> for MinerError {
@@ -112,16 +114,20 @@ impl From<FeasibilityError> for MinerError {
 	}
 }
 
-/// Reports the trimming result of a mined solution
+/// Reports the trimming result of a mined solution.
 #[derive(Debug, Clone)]
 pub struct TrimmingStatus {
+	/// Number of voters trimmed due to the solution weight limits.
 	weight: usize,
+	/// Number of voters trimmed due to the solution length limits.
 	length: usize,
+	/// Number of edges (voter -> target) trimmed due to the max backers per winner bound.
+	edges: usize,
 }
 
 impl TrimmingStatus {
 	pub fn is_trimmed(&self) -> bool {
-		self.weight > 0 || self.length > 0
+		self.weight > 0 || self.length > 0 || self.edges > 0
 	}
 
 	pub fn trimmed_weight(&self) -> usize {
@@ -131,6 +137,10 @@ impl TrimmingStatus {
 	pub fn trimmed_length(&self) -> usize {
 		self.length
 	}
+
+	pub fn trimmed_edges(&self) -> usize {
+		self.edges
+	}
 }
 
 /// Save a given call into OCW storage.
@@ -194,6 +204,7 @@ impl<T: Config + CreateInherent<Call<T>>> Pallet<T> {
 		let RoundSnapshot { voters, targets } =
 			Snapshot::<T>::get().ok_or(MinerError::SnapshotUnAvailable)?;
 		let desired_targets = DesiredTargets::<T>::get().ok_or(MinerError::SnapshotUnAvailable)?;
+		ensure!(desired_targets <= T::MaxWinners::get(), MinerError::TooManyDesiredTargets);
 		let (solution, score, size, is_trimmed) =
 			Miner::<T::MinerConfig>::mine_solution_with_snapshot::<T::Solver>(
 				voters,
@@ -262,16 +273,17 @@ impl<T: Config + CreateInherent<Call<T>>> Pallet<T> {
 	/// Mine a new solution as a call. Performs all checks.
 	pub fn mine_checked_call() -> Result<Call<T>, MinerError> {
 		// get the solution, with a load of checks to ensure if submitted, IT IS ABSOLUTELY VALID.
-		let (raw_solution, witness, _) = Self::mine_and_check()?;
+		let (raw_solution, witness, _trimming) = Self::mine_and_check()?;
 
 		let score = raw_solution.score;
 		let call: Call<T> = Call::submit_unsigned { raw_solution: Box::new(raw_solution), witness };
 
 		log!(
 			debug,
-			"mined a solution with score {:?} and size {}",
+			"mined a solution with score {:?} and size {} and trimming {:?}",
 			score,
-			call.using_encoded(|b| b.len())
+			call.using_encoded(|b| b.len()),
+			_trimming
 		);
 
 		Ok(call)
@@ -393,7 +405,7 @@ impl<T: Config + CreateInherent<Call<T>>> Pallet<T> {
 		// ensure score is being improved. Panic henceforth.
 		ensure!(
 			QueuedSolution::<T>::get()
-				.map_or(true, |q: ReadySolution<_, _>| raw_solution.score > q.score),
+				.map_or(true, |q: ReadySolution<_, _, _>| raw_solution.score > q.score),
 			Error::<T>::PreDispatchWeakSubmission,
 		);
 
@@ -427,8 +439,11 @@ pub trait MinerConfig {
 	///
 	/// The weight is computed using `solution_weight`.
 	type MaxWeight: Get<Weight>;
-	/// The maximum number of winners that can be elected.
+	/// The maximum number of winners that can be elected in the single page supported by this
+	/// pallet.
 	type MaxWinners: Get<u32>;
+	/// The maximum number of backers per winner in the last solution.
+	type MaxBackersPerWinner: Get<u32>;
 	/// Something that can compute the weight of a solution.
 	///
 	/// This weight estimate is then used to trim the solution, based on [`MinerConfig::MaxWeight`].
@@ -490,7 +505,11 @@ impl<T: MinerConfig> Miner<T> {
 
 		let ElectionResult { assignments, winners: _ } = election_result;
 
-		// Reduce (requires round-trip to staked form)
+		// keeps track of how many edges were trimmed out.
+		let mut edges_trimmed = 0;
+
+		// Reduce (requires round-trip to staked form) and ensures the max backer per winner bound
+		// requirements are met.
 		let sorted_assignments = {
 			// convert to staked and reduce.
 			let mut staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)?;
@@ -517,6 +536,53 @@ impl<T: MinerConfig> Miner<T> {
 				},
 			);
 
+			// ensures that the max backers per winner bounds are respected given the supports
+			// generated from the assignments. We achieve that by removing edges (voter ->
+			// target) in the assignments with lower stake until the total number of backers per
+			// winner fits within the expected bounded supports. This should be performed *after*
+			// applying reduce over the assignments to avoid over-trimming.
+			//
+			// a potential trimming does not affect the desired targets of the solution as the
+			// targets have *too many* edges by definition if trimmed.
+			let max_backers_per_winner = T::MaxBackersPerWinner::get().saturated_into::<usize>();
+
+			let _ = sp_npos_elections::to_supports(&staked)
+				.iter_mut()
+				.filter(|(_, support)| support.voters.len() > max_backers_per_winner)
+				.for_each(|(target, ref mut support)| {
+					// first sort by support stake, lowest at the tail.
+					support.voters.sort_by(|a, b| b.1.cmp(&a.1));
+
+					// filter out lowest stake edge in this support.
+					// optimization note: collects edge voters to remove from assignments into a
+					// btree set to optimize the search in the next loop.
+					let filtered: alloc::collections::BTreeSet<_> = support
+						.voters
+						.split_off(max_backers_per_winner)
+						.into_iter()
+						.map(|(who, _stake)| who)
+						.collect();
+
+					// remove lowest stake edges calculated above from assignments.
+					staked.iter_mut().for_each(|assignment| {
+						if filtered.contains(&assignment.who) {
+							assignment.distribution.retain(|(t, _)| t != target);
+						}
+					});
+
+					edges_trimmed += filtered.len();
+				});
+
+			debug_assert!({
+				// at this point we expect the supports generated from the assignments to fit within
+				// the expected bounded supports.
+				let expected_ok: Result<
+					crate::BoundedSupports<_, T::MaxWinners, T::MaxBackersPerWinner>,
+					_,
+				> = sp_npos_elections::to_supports(&staked).try_into();
+				expected_ok.is_ok()
+			});
+
 			// convert back.
 			assignment_staked_to_ratio_normalized(staked)?
 		};
@@ -549,7 +615,8 @@ impl<T: MinerConfig> Miner<T> {
 		// re-calc score.
 		let score = solution.clone().score(stake_of, voter_at, target_at)?;
 
-		let is_trimmed = TrimmingStatus { weight: weight_trimmed, length: length_trimmed };
+		let is_trimmed =
+			TrimmingStatus { weight: weight_trimmed, length: length_trimmed, edges: edges_trimmed };
 
 		Ok((solution, score, size, is_trimmed))
 	}
@@ -618,7 +685,7 @@ impl<T: MinerConfig> Miner<T> {
 		let remove = assignments.len().saturating_sub(maximum_allowed_voters);
 
 		log_no_system!(
-			debug,
+			trace,
 			"from {} assignments, truncating to {} for length, removing {}",
 			assignments.len(),
 			maximum_allowed_voters,
@@ -747,7 +814,7 @@ impl<T: MinerConfig> Miner<T> {
 		snapshot: RoundSnapshot<T::AccountId, MinerVoterOf<T>>,
 		current_round: u32,
 		minimum_untrusted_score: Option<ElectionScore>,
-	) -> Result<ReadySolution<T::AccountId, T::MaxWinners>, FeasibilityError> {
+	) -> Result<ReadySolutionOf<T>, FeasibilityError> {
 		let RawSolution { solution, score, round } = raw_solution;
 		let RoundSnapshot { voters: snapshot_voters, targets: snapshot_targets } = snapshot;
 
@@ -814,9 +881,12 @@ impl<T: MinerConfig> Miner<T> {
 
 		// Finally, check that the claimed score was indeed correct.
 		let known_score = supports.evaluate();
+
 		ensure!(known_score == score, FeasibilityError::InvalidScore);
 
-		// Size of winners in miner solution is equal to `desired_targets` <= `MaxWinners`.
+		// Size of winners in miner solution is equal to `desired_targets` <= `MaxWinners`. In
+		// addition, the miner should have ensured that the MaxBackerPerWinner bound in respected,
+		// thus this conversion should not fail.
 		let supports = supports
 			.try_into()
 			.defensive_map_err(|_| FeasibilityError::BoundedConversionFailed)?;
@@ -1862,6 +1932,193 @@ mod tests {
 		})
 	}
 
+	#[test]
+	fn mine_solution_always_respects_max_backers_per_winner() {
+		use crate::mock::MaxBackersPerWinner;
+		use frame_election_provider_support::BoundedSupport;
+
+		let targets = vec![10, 20, 30, 40];
+		let voters = vec![
+			(1, 11, bounded_vec![10, 20, 30]),
+			(2, 12, bounded_vec![10, 20, 30]),
+			(3, 13, bounded_vec![10, 20, 30]),
+			(4, 14, bounded_vec![10, 20, 30]),
+			(5, 15, bounded_vec![10, 20, 40]),
+		];
+		let snapshot = RoundSnapshot { voters: voters.clone(), targets: targets.clone() };
+		let (round, desired_targets) = (1, 3);
+
+		// election with unbounded max backers per winnner.
+		ExtBuilder::default().max_backers_per_winner(u32::MAX).build_and_execute(|| {
+			assert_eq!(MaxBackersPerWinner::get(), u32::MAX);
+
+			let (solution, expected_score_unbounded, _, trimming_status) =
+				Miner::<Runtime>::mine_solution_with_snapshot::<<Runtime as Config>::Solver>(
+					voters.clone(),
+					targets.clone(),
+					desired_targets,
+				)
+				.unwrap();
+
+			let ready_solution = Miner::<Runtime>::feasibility_check(
+				RawSolution { solution, score: expected_score_unbounded, round },
+				Default::default(),
+				desired_targets,
+				snapshot.clone(),
+				round,
+				Default::default(),
+			)
+			.unwrap();
+
+			assert_eq!(
+				ready_solution.supports.into_iter().collect::<Vec<_>>(),
+				vec![
+					(
+						10,
+						BoundedSupport { total: 25, voters: bounded_vec![(1, 11), (5, 5), (4, 9)] }
+					),
+					(20, BoundedSupport { total: 22, voters: bounded_vec![(2, 12), (5, 10)] }),
+					(30, BoundedSupport { total: 18, voters: bounded_vec![(3, 13), (4, 5)] })
+				]
+			);
+
+			// no trimmed edges.
+			assert_eq!(trimming_status.trimmed_edges(), 0);
+		});
+
+		// election with max 1 backer per winnner.
+		ExtBuilder::default().max_backers_per_winner(1).build_and_execute(|| {
+			assert_eq!(MaxBackersPerWinner::get(), 1);
+
+			let (solution, expected_score_bounded, _, trimming_status) =
+				Miner::<Runtime>::mine_solution_with_snapshot::<<Runtime as Config>::Solver>(
+					voters,
+					targets,
+					desired_targets,
+				)
+				.unwrap();
+
+			let ready_solution = Miner::<Runtime>::feasibility_check(
+				RawSolution { solution, score: expected_score_bounded, round },
+				Default::default(),
+				desired_targets,
+				snapshot,
+				round,
+				Default::default(),
+			)
+			.unwrap();
+
+			for (_, supports) in ready_solution.supports.iter() {
+				assert!((supports.voters.len() as u32) <= MaxBackersPerWinner::get());
+			}
+
+			assert_eq!(
+				ready_solution.supports.into_iter().collect::<Vec<_>>(),
+				vec![
+					(10, BoundedSupport { total: 11, voters: bounded_vec![(1, 11)] }),
+					(20, BoundedSupport { total: 12, voters: bounded_vec![(2, 12)] }),
+					(30, BoundedSupport { total: 13, voters: bounded_vec![(3, 13)] })
+				]
+			);
+
+			// four trimmed edges.
+			assert_eq!(trimming_status.trimmed_edges(), 4);
+		});
+	}
+
+	#[test]
+	fn max_backers_edges_trims_lowest_stake() {
+		use crate::mock::MaxBackersPerWinner;
+
+		ExtBuilder::default().build_and_execute(|| {
+			let targets = vec![10, 20, 30, 40];
+
+			let voters = vec![
+				(1, 100, bounded_vec![10, 20]),
+				(2, 200, bounded_vec![10, 20, 30]),
+				(3, 300, bounded_vec![10, 30]),
+				(4, 400, bounded_vec![10, 30]),
+				(5, 500, bounded_vec![10, 20, 30]),
+				(6, 600, bounded_vec![10, 20, 30, 40]),
+			];
+			let snapshot = RoundSnapshot { voters: voters.clone(), targets: targets.clone() };
+			let (round, desired_targets) = (1, 4);
+
+			let max_backers_bound = u32::MAX;
+			let trim_backers_bound = 2;
+
+			// election with unbounded max backers per winnner.
+			MaxBackersPerWinner::set(max_backers_bound);
+			let (solution, score, _, trimming_status) =
+				Miner::<Runtime>::mine_solution_with_snapshot::<<Runtime as Config>::Solver>(
+					voters.clone(),
+					targets.clone(),
+					desired_targets,
+				)
+				.unwrap();
+
+			assert_eq!(trimming_status.trimmed_edges(), 0);
+
+			let ready_solution = Miner::<Runtime>::feasibility_check(
+				RawSolution { solution, score, round },
+				Default::default(),
+				desired_targets,
+				snapshot.clone(),
+				round,
+				Default::default(),
+			)
+			.unwrap();
+
+			let full_supports = ready_solution.supports.into_iter().collect::<Vec<_>>();
+
+			// gather the expected trimmed supports (lowest stake from supports with more backers
+			// than expected when MaxBackersPerWinner is 2) from the full, unbounded supports.
+			let expected_trimmed_supports = full_supports
+				.into_iter()
+				.filter(|(_, s)| s.voters.len() as u32 > trim_backers_bound)
+				.map(|(t, s)| (t, s.voters.into_iter().min_by(|a, b| a.1.cmp(&b.1)).unwrap()))
+				.collect::<Vec<_>>();
+
+			// election with bounded 2 max backers per winnner.
+			MaxBackersPerWinner::set(trim_backers_bound);
+			let (solution, score, _, trimming_status) =
+				Miner::<Runtime>::mine_solution_with_snapshot::<<Runtime as Config>::Solver>(
+					voters.clone(),
+					targets.clone(),
+					desired_targets,
+				)
+				.unwrap();
+
+			assert_eq!(trimming_status.trimmed_edges(), 2);
+
+			let ready_solution = Miner::<Runtime>::feasibility_check(
+				RawSolution { solution, score, round },
+				Default::default(),
+				desired_targets,
+				snapshot.clone(),
+				round,
+				Default::default(),
+			)
+			.unwrap();
+
+			let trimmed_supports = ready_solution.supports.into_iter().collect::<Vec<_>>();
+
+			// gather all trimmed_supports edges from the trimmed solution.
+			let mut trimmed_supports_edges_full = vec![];
+			for (t, s) in trimmed_supports {
+				for v in s.voters {
+					trimmed_supports_edges_full.push((t, v));
+				}
+			}
+
+			// expected trimmed supports set should be disjoint to the trimmed_supports full set of
+			// edges.
+			for edge in trimmed_supports_edges_full {
+				assert!(!expected_trimmed_supports.contains(&edge));
+			}
+		})
+	}
+
 	#[test]
 	fn trim_assignments_length_does_not_modify_when_short_enough() {
 		ExtBuilder::default().build_and_execute(|| {
diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
index 8118dfa2045..8c8de865600 100644
--- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
+++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
@@ -24,7 +24,7 @@ use frame_support::{
 	PalletId,
 };
 use frame_system::EnsureRoot;
-use sp_core::{ConstU32, Get};
+use sp_core::{ConstBool, ConstU32, Get};
 use sp_npos_elections::{ElectionScore, VoteWeight};
 use sp_runtime::{
 	offchain::{
@@ -172,6 +172,8 @@ parameter_types! {
 	pub static TransactionPriority: transaction_validity::TransactionPriority = 1;
 	#[derive(Debug)]
 	pub static MaxWinners: u32 = 100;
+	#[derive(Debug)]
+	pub static MaxBackersPerWinner: u32 = 100;
 	pub static MaxVotesPerVoter: u32 = 16;
 	pub static SignedFixedDeposit: Balance = 1;
 	pub static SignedDepositIncreaseFactor: Percent = Percent::from_percent(10);
@@ -200,12 +202,18 @@ impl pallet_election_provider_multi_phase::Config for Runtime {
 	type SlashHandler = ();
 	type RewardHandler = ();
 	type DataProvider = Staking;
-	type Fallback =
-		frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, MaxWinners)>;
+	type Fallback = frame_election_provider_support::NoElection<(
+		AccountId,
+		BlockNumber,
+		Staking,
+		MaxWinners,
+		MaxBackersPerWinner,
+	)>;
 	type GovernanceFallback = onchain::OnChainExecution<OnChainSeqPhragmen>;
 	type Solver = SequentialPhragmen<AccountId, SolutionAccuracyOf<Runtime>, ()>;
 	type ForceOrigin = EnsureRoot<AccountId>;
 	type MaxWinners = MaxWinners;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
 	type ElectionBounds = ElectionBounds;
 	type BenchmarkingConfig = NoopElectionProviderBenchmarkConfig;
 	type WeightInfo = ();
@@ -219,6 +227,7 @@ impl MinerConfig for Runtime {
 	type MaxLength = MinerMaxLength;
 	type MaxWeight = MinerMaxWeight;
 	type MaxWinners = MaxWinners;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
 
 	fn solution_weight(_v: u32, _t: u32, _a: u32, _d: u32) -> Weight {
 		Weight::zero()
@@ -356,6 +365,9 @@ parameter_types! {
 }
 
 impl onchain::Config for OnChainSeqPhragmen {
+	type MaxWinnersPerPage = MaxWinners;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
+	type Sort = ConstBool<true>;
 	type System = Runtime;
 	type Solver = SequentialPhragmen<
 		AccountId,
@@ -363,7 +375,6 @@ impl onchain::Config for OnChainSeqPhragmen {
 	>;
 	type DataProvider = Staking;
 	type WeightInfo = ();
-	type MaxWinners = MaxWinners;
 	type Bounds = ElectionBounds;
 }
 
diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml
index 32fa381e1d2..aadf87edb0e 100644
--- a/substrate/frame/election-provider-support/Cargo.toml
+++ b/substrate/frame/election-provider-support/Cargo.toml
@@ -24,6 +24,8 @@ sp-arithmetic = { workspace = true }
 sp-core = { workspace = true }
 sp-npos-elections = { workspace = true }
 sp-runtime = { workspace = true }
+sp-std = { workspace = true }
+
 
 [dev-dependencies]
 rand = { features = ["small_rng"], workspace = true, default-features = true }
@@ -43,6 +45,7 @@ std = [
 	"sp-io/std",
 	"sp-npos-elections/std",
 	"sp-runtime/std",
+	"sp-std/std",
 ]
 runtime-benchmarks = [
 	"frame-support/runtime-benchmarks",
diff --git a/substrate/frame/election-provider-support/benchmarking/src/inner.rs b/substrate/frame/election-provider-support/benchmarking/src/inner.rs
index 7fb8c1bdb72..a7b969bb1cf 100644
--- a/substrate/frame/election-provider-support/benchmarking/src/inner.rs
+++ b/substrate/frame/election-provider-support/benchmarking/src/inner.rs
@@ -37,7 +37,7 @@ fn set_up_voters_targets<AccountId: Decode + Clone>(
 	voters_len: u32,
 	targets_len: u32,
 	degree: usize,
-) -> (Vec<(AccountId, u64, impl IntoIterator<Item = AccountId>)>, Vec<AccountId>) {
+) -> (Vec<(AccountId, u64, impl Clone + IntoIterator<Item = AccountId>)>, Vec<AccountId>) {
 	// fill targets.
 	let mut targets = (0..targets_len)
 		.map(|i| frame_benchmarking::account::<AccountId>("Target", i, SEED))
diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/src/compact.rs b/substrate/frame/election-provider-support/solution-type/fuzzer/src/compact.rs
index 90fd9509e6f..c4ae7c84623 100644
--- a/substrate/frame/election-provider-support/solution-type/fuzzer/src/compact.rs
+++ b/substrate/frame/election-provider-support/solution-type/fuzzer/src/compact.rs
@@ -21,7 +21,8 @@ use sp_arithmetic::Percent;
 use sp_runtime::codec::{Encode, Error};
 
 fn main() {
-	generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::<
+	generate_solution_type!(
+		#[compact] pub struct InnerTestSolutionCompact::<
 		VoterIndex = u32,
 		TargetIndex = u32,
 		Accuracy = Percent,
diff --git a/substrate/frame/election-provider-support/solution-type/src/codec.rs b/substrate/frame/election-provider-support/solution-type/src/codec.rs
index 16d5f17469b..c1dd62fe555 100644
--- a/substrate/frame/election-provider-support/solution-type/src/codec.rs
+++ b/substrate/frame/election-provider-support/solution-type/src/codec.rs
@@ -33,6 +33,7 @@ pub(crate) fn codec_and_info_impl(
 	let scale_info = scale_info_impl(&ident, &voter_type, &target_type, &weight_type, count);
 
 	quote! {
+		impl _fepsp::codec::EncodeLike for #ident {}
 		#encode
 		#decode
 		#scale_info
diff --git a/substrate/frame/election-provider-support/solution-type/src/single_page.rs b/substrate/frame/election-provider-support/solution-type/src/single_page.rs
index de59df162c8..f57dcb9694a 100644
--- a/substrate/frame/election-provider-support/solution-type/src/single_page.rs
+++ b/substrate/frame/election-provider-support/solution-type/src/single_page.rs
@@ -84,6 +84,8 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result<TokenStream2> {
 			Eq,
 			Clone,
 			Debug,
+			Ord,
+			PartialOrd,
 			_fepsp::codec::Encode,
 			_fepsp::codec::Decode,
 			_fepsp::scale_info::TypeInfo,
@@ -96,6 +98,8 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result<TokenStream2> {
 	let from_impl = from_impl(&struct_name, count);
 	let into_impl = into_impl(&assignment_name, count, weight_type.clone());
 	let from_index_impl = crate::index_assignment::from_impl(&struct_name, count);
+	let sort_impl = sort_impl(count);
+	let remove_weakest_sorted_impl = remove_weakest_sorted_impl(count);
 
 	Ok(quote! (
 		/// A struct to encode a election assignment in a compact way.
@@ -178,6 +182,29 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result<TokenStream2> {
 
 				all_targets.into_iter().collect()
 			}
+
+			fn sort<F>(&mut self, mut voter_stake: F)
+			where
+				F: FnMut(&Self::VoterIndex) -> _feps::VoteWeight
+			{
+				#sort_impl
+			}
+
+			fn remove_weakest_sorted<F>(&mut self, mut voter_stake: F) -> Option<Self::VoterIndex>
+			where
+				F: FnMut(&Self::VoterIndex) -> _feps::VoteWeight
+			{
+				#remove_weakest_sorted_impl
+			}
+
+			fn corrupt(&mut self) {
+				self.votes1.push(
+					(
+						_fepsp::sp_arithmetic::traits::Bounded::max_value(),
+						_fepsp::sp_arithmetic::traits::Bounded::max_value()
+					)
+				)
+			}
 		}
 
 		type __IndexAssignment = _feps::IndexAssignment<
@@ -185,11 +212,12 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result<TokenStream2> {
 			<#ident as _feps::NposSolution>::TargetIndex,
 			<#ident as _feps::NposSolution>::Accuracy,
 		>;
+
 		impl _fepsp::codec::MaxEncodedLen for #ident {
 			fn max_encoded_len() -> usize {
 				use frame_support::traits::Get;
 				use _fepsp::codec::Encode;
-				let s: u32 = #max_voters::get();
+				let s: u32 = <#max_voters as _feps::Get<u32>>::get();
 				let max_element_size =
 					// the first voter..
 					#voter_type::max_encoded_len()
@@ -206,6 +234,7 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result<TokenStream2> {
 					.saturating_add((s as usize).saturating_mul(max_element_size))
 			}
 		}
+
 		impl<'a> core::convert::TryFrom<&'a [__IndexAssignment]> for #ident {
 			type Error = _feps::Error;
 			fn try_from(index_assignments: &'a [__IndexAssignment]) -> Result<Self, Self::Error> {
@@ -227,6 +256,65 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result<TokenStream2> {
 	))
 }
 
+fn sort_impl(count: usize) -> TokenStream2 {
+	(1..=count)
+		.map(|c| {
+			let field = vote_field(c);
+			quote! {
+				// NOTE: self.filed here is sometimes `Vec<(voter, weight)>` and sometimes
+				// `Vec<(voter, weights, last_weight)>`, but Rust's great patter matching makes it
+				// all work super nice.
+				self.#field.sort_by(|(a, ..), (b, ..)| voter_stake(&b).cmp(&voter_stake(&a)));
+				// ---------------------------------^^ in all fields, the index 0 is the voter id.
+			}
+		})
+		.collect::<TokenStream2>()
+}
+
+fn remove_weakest_sorted_impl(count: usize) -> TokenStream2 {
+	// check minium from field 2 onwards. We assume 0 is minimum
+	let check_minimum = (2..=count).map(|c| {
+		let filed = vote_field(c);
+		quote! {
+			let filed_value = self.#filed
+				.last()
+				.map(|(x, ..)| voter_stake(x))
+				.unwrap_or_else(|| _fepsp::sp_arithmetic::traits::Bounded::max_value());
+			if filed_value < minimum {
+				minimum = filed_value;
+				minimum_filed = #c
+			}
+		}
+	});
+
+	let remove_minimum_match = (1..=count).map(|c| {
+		let filed = vote_field(c);
+		quote! {
+			#c => self.#filed.pop().map(|(x, ..)| x),
+		}
+	});
+
+	let first_filed = vote_field(1);
+	quote! {
+		// we assume first one is the minimum. No problem if it is empty.
+		let mut minimum_filed = 1;
+		let mut minimum = self.#first_filed
+			.last()
+			.map(|(x, ..)| voter_stake(x))
+			.unwrap_or_else(|| _fepsp::sp_arithmetic::traits::Bounded::max_value());
+
+		#( #check_minimum )*
+
+		match minimum_filed {
+			#( #remove_minimum_match )*
+			_ => {
+				debug_assert!(false);
+				None
+			}
+		}
+	}
+}
+
 fn remove_voter_impl(count: usize) -> TokenStream2 {
 	let field_name = vote_field(1);
 	let single = quote! {
diff --git a/substrate/frame/election-provider-support/src/bounds.rs b/substrate/frame/election-provider-support/src/bounds.rs
index 6b2423b7fec..6ef0604cb4b 100644
--- a/substrate/frame/election-provider-support/src/bounds.rs
+++ b/substrate/frame/election-provider-support/src/bounds.rs
@@ -54,6 +54,7 @@
 //! A default or `None` bound means that no bounds are enforced (i.e. unlimited result size). In
 //! general, be careful when using unbounded election bounds in production.
 
+use codec::Encode;
 use core::ops::Add;
 use sp_runtime::traits::Zero;
 
@@ -154,6 +155,15 @@ impl DataProviderBounds {
 			self.size_exhausted(given_size.unwrap_or(SizeBound::zero()))
 	}
 
+	/// Ensures the given encode-able slice meets both the length and count bounds.
+	///
+	/// Same as `exhausted` but a better syntax.
+	pub fn slice_exhausted<T: Encode>(self, input: &[T]) -> bool {
+		let size = Some((input.encoded_size() as u32).into());
+		let count = Some((input.len() as u32).into());
+		self.exhausted(size, count)
+	}
+
 	/// Returns an instance of `Self` that is constructed by capping both the `count` and `size`
 	/// fields. If `self` is None, overwrite it with the provided bounds.
 	pub fn max(self, bounds: DataProviderBounds) -> Self {
diff --git a/substrate/frame/election-provider-support/src/lib.rs b/substrate/frame/election-provider-support/src/lib.rs
index cb3249e388a..68aee2c82e6 100644
--- a/substrate/frame/election-provider-support/src/lib.rs
+++ b/substrate/frame/election-provider-support/src/lib.rs
@@ -21,10 +21,9 @@
 //! within FRAME pallets.
 //!
 //! Something that will provide the functionality of election will implement
-//! [`ElectionProvider`] and its parent-trait [`ElectionProviderBase`], whilst needing an
-//! associated [`ElectionProviderBase::DataProvider`], which needs to be
-//! fulfilled by an entity implementing [`ElectionDataProvider`]. Most often, *the data provider is*
-//! the receiver of the election, resulting in a diagram as below:
+//! [`ElectionProvider`], whilst needing an associated [`ElectionProvider::DataProvider`], which
+//! needs to be fulfilled by an entity implementing [`ElectionDataProvider`]. Most often, *the data
+//! provider is* the receiver of the election, resulting in a diagram as below:
 //!
 //! ```ignore
 //!                                         ElectionDataProvider
@@ -56,8 +55,15 @@
 //!
 //! To accommodate both type of elections in one trait, the traits lean toward **stateful
 //! election**, as it is more general than the stateless. This is why [`ElectionProvider::elect`]
-//! has no parameters. All value and type parameter must be provided by the [`ElectionDataProvider`]
-//! trait, even if the election happens immediately.
+//! does not receive election data as an input. All value and type parameter must be provided by the
+//! [`ElectionDataProvider`] trait, even if the election happens immediately.
+//!
+//! ## Multi-page election support
+//!
+//! Both [`ElectionDataProvider`] and [`ElectionProvider`] traits are parameterized by page,
+//! supporting an election to be performed over multiple pages. This enables the
+//! [`ElectionDataProvider`] implementor to provide all the election data over multiple pages.
+//! Similarly [`ElectionProvider::elect`] is parameterized by page index.
 //!
 //! ## Election Data
 //!
@@ -104,17 +110,17 @@
 //!     impl<T: Config> ElectionDataProvider for Pallet<T> {
 //!         type AccountId = AccountId;
 //!         type BlockNumber = BlockNumber;
-//!         type MaxVotesPerVoter = ConstU32<1>;
+//!         type MaxVotesPerVoter = ConstU32<100>;
 //!
 //!         fn desired_targets() -> data_provider::Result<u32> {
 //!             Ok(1)
 //!         }
-//!         fn electing_voters(bounds: DataProviderBounds)
+//!         fn electing_voters(bounds: DataProviderBounds, _page: PageIndex)
 //!           -> data_provider::Result<Vec<VoterOf<Self>>>
 //!         {
 //!             Ok(Default::default())
 //!         }
-//!         fn electable_targets(bounds: DataProviderBounds) -> data_provider::Result<Vec<AccountId>> {
+//!         fn electable_targets(bounds: DataProviderBounds, _page: PageIndex) -> data_provider::Result<Vec<AccountId>> {
 //!             Ok(vec![10, 20, 30])
 //!         }
 //!         fn next_election_prediction(now: BlockNumber) -> BlockNumber {
@@ -126,40 +132,54 @@
 //!
 //! mod generic_election_provider {
 //!     use super::*;
+//!     use sp_runtime::traits::Zero;
 //!
 //!     pub struct GenericElectionProvider<T: Config>(std::marker::PhantomData<T>);
 //!
 //!     pub trait Config {
 //!         type DataProvider: ElectionDataProvider<AccountId=AccountId, BlockNumber = BlockNumber>;
+//!         type MaxWinnersPerPage: Get<u32>;
+//!         type MaxBackersPerWinner: Get<u32>;
+//!         type Pages: Get<u32>;
 //!     }
 //!
-//!     impl<T: Config> ElectionProviderBase for GenericElectionProvider<T> {
+//!     impl<T: Config> ElectionProvider for GenericElectionProvider<T> {
 //!         type AccountId = AccountId;
 //!         type BlockNumber = BlockNumber;
 //!         type Error = &'static str;
+//!         type MaxBackersPerWinner = T::MaxBackersPerWinner;
+//!         type MaxWinnersPerPage = T::MaxWinnersPerPage;
+//!         type Pages = T::Pages;
 //!         type DataProvider = T::DataProvider;
-//!         type MaxWinners = ConstU32<{ u32::MAX }>;
 //!
-//!     }
+//!         fn elect(page: PageIndex) -> Result<BoundedSupportsOf<Self>, Self::Error> {
+//!             unimplemented!()
+//!         }
 //!
-//!     impl<T: Config> ElectionProvider for GenericElectionProvider<T> {
-//!         fn ongoing() -> bool { false }
-//!         fn elect() -> Result<BoundedSupportsOf<Self>, Self::Error> {
-//!             Self::DataProvider::electable_targets(DataProviderBounds::default())
-//!                 .map_err(|_| "failed to elect")
-//!                 .map(|t| bounded_vec![(t[0], Support::default())])
+//!         fn ongoing() -> bool {
+//!             unimplemented!()
 //!         }
 //!     }
 //! }
 //!
 //! mod runtime {
+//!     use frame_support::parameter_types;
 //!     use super::generic_election_provider;
 //!     use super::data_provider_mod;
 //!     use super::AccountId;
 //!
+//!     parameter_types! {
+//!         pub static MaxWinnersPerPage: u32 = 10;
+//!         pub static MaxBackersPerWinner: u32 = 20;
+//!         pub static Pages: u32 = 2;
+//!     }
+//!
 //!     struct Runtime;
 //!     impl generic_election_provider::Config for Runtime {
 //!         type DataProvider = data_provider_mod::Pallet<Runtime>;
+//!         type MaxWinnersPerPage = MaxWinnersPerPage;
+//!         type MaxBackersPerWinner = MaxBackersPerWinner;
+//!         type Pages = Pages;
 //!     }
 //!
 //!     impl data_provider_mod::Config for Runtime {
@@ -181,21 +201,24 @@ extern crate alloc;
 
 use alloc::{boxed::Box, vec::Vec};
 use core::fmt::Debug;
+use frame_support::traits::{Defensive, DefensiveResult};
+use sp_core::ConstU32;
 use sp_runtime::{
 	traits::{Bounded, Saturating, Zero},
 	RuntimeDebug,
 };
 
 pub use bounds::DataProviderBounds;
-pub use codec::{Decode, Encode};
+pub use codec::{Decode, Encode, MaxEncodedLen};
 /// Re-export the solution generation macro.
 pub use frame_election_provider_solution_type::generate_solution_type;
-pub use frame_support::{traits::Get, weights::Weight, BoundedVec};
+pub use frame_support::{traits::Get, weights::Weight, BoundedVec, DefaultNoBound};
+use scale_info::TypeInfo;
 /// Re-export some type as they are used in the interface.
 pub use sp_arithmetic::PerThing;
 pub use sp_npos_elections::{
-	Assignment, BalancingConfig, BoundedSupports, ElectionResult, Error, ExtendedBalance,
-	IdentifierT, PerThing128, Support, Supports, VoteWeight,
+	Assignment, BalancingConfig, ElectionResult, Error, ExtendedBalance, IdentifierT, PerThing128,
+	Support, Supports, VoteWeight,
 };
 pub use traits::NposSolution;
 
@@ -234,6 +257,9 @@ mod mock;
 #[cfg(test)]
 mod tests;
 
+/// A page index for the multi-block elections pagination.
+pub type PageIndex = u32;
+
 /// The [`IndexAssignment`] type is an intermediate between the assignments list
 /// ([`&[Assignment<T>]`][Assignment]) and `SolutionOf<T>`.
 ///
@@ -251,7 +277,9 @@ pub struct IndexAssignment<VoterIndex, TargetIndex, P: PerThing> {
 	pub distribution: Vec<(TargetIndex, P)>,
 }
 
-impl<VoterIndex, TargetIndex, P: PerThing> IndexAssignment<VoterIndex, TargetIndex, P> {
+impl<VoterIndex: core::fmt::Debug, TargetIndex: core::fmt::Debug, P: PerThing>
+	IndexAssignment<VoterIndex, TargetIndex, P>
+{
 	pub fn new<AccountId: IdentifierT>(
 		assignment: &Assignment<AccountId, P>,
 		voter_index: impl Fn(&AccountId) -> Option<VoterIndex>,
@@ -293,21 +321,43 @@ pub trait ElectionDataProvider {
 	/// Maximum number of votes per voter that this data provider is providing.
 	type MaxVotesPerVoter: Get<u32>;
 
-	/// All possible targets for the election, i.e. the targets that could become elected, thus
-	/// "electable".
+	/// Returns the possible targets for the election associated with the provided `page`, i.e. the
+	/// targets that could become elected, thus "electable".
 	///
 	/// This should be implemented as a self-weighing function. The implementor should register its
 	/// appropriate weight at the end of execution with the system pallet directly.
-	fn electable_targets(bounds: DataProviderBounds)
-		-> data_provider::Result<Vec<Self::AccountId>>;
+	fn electable_targets(
+		bounds: DataProviderBounds,
+		page: PageIndex,
+	) -> data_provider::Result<Vec<Self::AccountId>>;
 
-	/// All the voters that participate in the election, thus "electing".
+	/// A state-less version of [`Self::electable_targets`].
+	///
+	/// An election-provider that only uses 1 page should use this.
+	fn electable_targets_stateless(
+		bounds: DataProviderBounds,
+	) -> data_provider::Result<Vec<Self::AccountId>> {
+		Self::electable_targets(bounds, 0)
+	}
+
+	/// All the voters that participate in the election associated with page `page`, thus
+	/// "electing".
 	///
 	/// Note that if a notion of self-vote exists, it should be represented here.
 	///
 	/// This should be implemented as a self-weighing function. The implementor should register its
 	/// appropriate weight at the end of execution with the system pallet directly.
-	fn electing_voters(bounds: DataProviderBounds) -> data_provider::Result<Vec<VoterOf<Self>>>;
+	fn electing_voters(
+		bounds: DataProviderBounds,
+		page: PageIndex,
+	) -> data_provider::Result<Vec<VoterOf<Self>>>;
+
+	/// A state-less version of [`Self::electing_voters`].
+	fn electing_voters_stateless(
+		bounds: DataProviderBounds,
+	) -> data_provider::Result<Vec<VoterOf<Self>>> {
+		Self::electing_voters(bounds, 0)
+	}
 
 	/// The number of targets to elect.
 	///
@@ -339,6 +389,9 @@ pub trait ElectionDataProvider {
 	) {
 	}
 
+	#[cfg(any(feature = "runtime-benchmarks", test))]
+	fn set_next_election(_to: u32) {}
+
 	/// Utility function only to be used in benchmarking scenarios, to be implemented optionally,
 	/// else a noop.
 	///
@@ -361,28 +414,38 @@ pub trait ElectionDataProvider {
 	/// Clear all voters and targets.
 	#[cfg(any(feature = "runtime-benchmarks", test))]
 	fn clear() {}
+
+	#[cfg(any(feature = "runtime-benchmarks", test))]
+	fn set_desired_targets(_count: u32) {}
 }
 
-/// Base trait for types that can provide election
-pub trait ElectionProviderBase {
-	/// The account identifier type.
+/// Something that can compute the result of an election and pass it back to the caller in a paged
+/// way.
+pub trait ElectionProvider {
+	/// The account ID identifier;
 	type AccountId;
 
 	/// The block number type.
 	type BlockNumber;
 
-	/// The error type that is returned by the provider.
-	type Error: Debug;
+	/// The error type returned by the provider;
+	type Error: Debug + PartialEq;
 
-	/// The upper bound on election winners that can be returned.
+	/// The maximum number of winners per page in results returned by this election provider.
 	///
-	/// # WARNING
+	/// A winner is an `AccountId` that is part of the final election result.
+	type MaxWinnersPerPage: Get<u32>;
+
+	/// The maximum number of backers that a single page may have in results returned by this
+	/// election provider.
 	///
-	/// when communicating with the data provider, one must ensure that
-	/// `DataProvider::desired_targets` returns a value less than this bound. An
-	/// implementation can chose to either return an error and/or sort and
-	/// truncate the output to meet this bound.
-	type MaxWinners: Get<u32>;
+	/// A backer is an `AccountId` that "backs" one or more winners. For example, in the context of
+	/// nominated proof of stake, a backer is a voter that nominates a winner validator in the
+	/// election result.
+	type MaxBackersPerWinner: Get<u32>;
+
+	/// The number of pages that this election provider supports.
+	type Pages: Get<PageIndex>;
 
 	/// The data provider of the election.
 	type DataProvider: ElectionDataProvider<
@@ -390,92 +453,108 @@ pub trait ElectionProviderBase {
 		BlockNumber = Self::BlockNumber,
 	>;
 
+	/// Elect a new set of winners.
+	///
+	/// A complete election may require multiple calls to [`ElectionProvider::elect`] if
+	/// [`ElectionProvider::Pages`] is higher than one.
+	///
+	/// The result is returned in a target major format, namely as vector of supports.
+	///
+	/// This should be implemented as a self-weighing function. The implementor should register its
+	/// appropriate weight at the end of execution with the system pallet directly.
+	fn elect(page: PageIndex) -> Result<BoundedSupportsOf<Self>, Self::Error>;
+
+	/// The index of the *most* significant page that this election provider supports.
+	fn msp() -> PageIndex {
+		Self::Pages::get().saturating_sub(1)
+	}
+
+	/// The index of the *least* significant page that this election provider supports.
+	fn lsp() -> PageIndex {
+		Zero::zero()
+	}
+
 	/// checked call to `Self::DataProvider::desired_targets()` ensuring the value never exceeds
-	/// [`Self::MaxWinners`].
+	/// [`Self::MaxWinnersPerPage`].
 	fn desired_targets_checked() -> data_provider::Result<u32> {
 		Self::DataProvider::desired_targets().and_then(|desired_targets| {
-			if desired_targets <= Self::MaxWinners::get() {
+			if desired_targets <= Self::MaxWinnersPerPage::get() {
 				Ok(desired_targets)
 			} else {
 				Err("desired_targets must not be greater than MaxWinners.")
 			}
 		})
 	}
-}
 
-/// Elect a new set of winners, bounded by `MaxWinners`.
-///
-/// It must always use [`ElectionProviderBase::DataProvider`] to fetch the data it needs.
-///
-/// This election provider that could function asynchronously. This implies that this election might
-/// needs data ahead of time (ergo, receives no arguments to `elect`), and might be `ongoing` at
-/// times.
-pub trait ElectionProvider: ElectionProviderBase {
-	/// Indicate if this election provider is currently ongoing an asynchronous election or not.
+	/// Indicate whether this election provider is currently ongoing an asynchronous election.
 	fn ongoing() -> bool;
-
-	/// Performs the election. This should be implemented as a self-weighing function. The
-	/// implementor should register its appropriate weight at the end of execution with the
-	/// system pallet directly.
-	fn elect() -> Result<BoundedSupportsOf<Self>, Self::Error>;
 }
 
 /// A (almost) marker trait that signifies an election provider as working synchronously. i.e. being
 /// *instant*.
 ///
-/// This must still use the same data provider as with [`ElectionProviderBase::DataProvider`].
+/// This must still use the same data provider as with [`ElectionProvider::DataProvider`].
 /// However, it can optionally overwrite the amount of voters and targets that are fetched from the
 /// data provider at runtime via `forced_input_voters_bound` and `forced_input_target_bound`.
-pub trait InstantElectionProvider: ElectionProviderBase {
+pub trait InstantElectionProvider: ElectionProvider {
 	fn instant_elect(
-		forced_input_voters_bound: DataProviderBounds,
-		forced_input_target_bound: DataProviderBounds,
+		voters: Vec<VoterOf<Self::DataProvider>>,
+		targets: Vec<Self::AccountId>,
+		desired_targets: u32,
 	) -> Result<BoundedSupportsOf<Self>, Self::Error>;
+
+	// Sine many instant election provider, like [`NoElection`] are meant to do nothing, this is a
+	// hint for the caller to call before, and if `false` is returned, not bother with passing all
+	// the info to `instant_elect`.
+	fn bother() -> bool;
 }
 
 /// An election provider that does nothing whatsoever.
 pub struct NoElection<X>(core::marker::PhantomData<X>);
 
-impl<AccountId, BlockNumber, DataProvider, MaxWinners> ElectionProviderBase
-	for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)>
+impl<AccountId, BlockNumber, DataProvider, MaxWinnersPerPage, MaxBackersPerWinner> ElectionProvider
+	for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinnersPerPage, MaxBackersPerWinner)>
 where
 	DataProvider: ElectionDataProvider<AccountId = AccountId, BlockNumber = BlockNumber>,
-	MaxWinners: Get<u32>,
+	MaxWinnersPerPage: Get<u32>,
+	MaxBackersPerWinner: Get<u32>,
 {
 	type AccountId = AccountId;
 	type BlockNumber = BlockNumber;
 	type Error = &'static str;
-	type MaxWinners = MaxWinners;
+	type Pages = ConstU32<1>;
 	type DataProvider = DataProvider;
-}
+	type MaxWinnersPerPage = MaxWinnersPerPage;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
 
-impl<AccountId, BlockNumber, DataProvider, MaxWinners> ElectionProvider
-	for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)>
-where
-	DataProvider: ElectionDataProvider<AccountId = AccountId, BlockNumber = BlockNumber>,
-	MaxWinners: Get<u32>,
-{
-	fn ongoing() -> bool {
-		false
+	fn elect(_page: PageIndex) -> Result<BoundedSupportsOf<Self>, Self::Error> {
+		Err("`NoElection` cannot do anything.")
 	}
 
-	fn elect() -> Result<BoundedSupportsOf<Self>, Self::Error> {
-		Err("`NoElection` cannot do anything.")
+	fn ongoing() -> bool {
+		false
 	}
 }
 
-impl<AccountId, BlockNumber, DataProvider, MaxWinners> InstantElectionProvider
-	for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)>
+impl<AccountId, BlockNumber, DataProvider, MaxWinnersPerPage, MaxBackersPerWinner>
+	InstantElectionProvider
+	for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinnersPerPage, MaxBackersPerWinner)>
 where
 	DataProvider: ElectionDataProvider<AccountId = AccountId, BlockNumber = BlockNumber>,
-	MaxWinners: Get<u32>,
+	MaxWinnersPerPage: Get<u32>,
+	MaxBackersPerWinner: Get<u32>,
 {
 	fn instant_elect(
-		_: DataProviderBounds,
-		_: DataProviderBounds,
+		_: Vec<VoterOf<Self::DataProvider>>,
+		_: Vec<Self::AccountId>,
+		_: u32,
 	) -> Result<BoundedSupportsOf<Self>, Self::Error> {
 		Err("`NoElection` cannot do anything.")
 	}
+
+	fn bother() -> bool {
+		false
+	}
 }
 
 /// A utility trait for something to implement `ElectionDataProvider` in a sensible way.
@@ -607,7 +686,11 @@ pub trait NposSolver {
 	fn solve(
 		to_elect: usize,
 		targets: Vec<Self::AccountId>,
-		voters: Vec<(Self::AccountId, VoteWeight, impl IntoIterator<Item = Self::AccountId>)>,
+		voters: Vec<(
+			Self::AccountId,
+			VoteWeight,
+			impl Clone + IntoIterator<Item = Self::AccountId>,
+		)>,
 	) -> Result<ElectionResult<Self::AccountId, Self::Accuracy>, Self::Error>;
 
 	/// Measure the weight used in the calculation of the solver.
@@ -617,6 +700,70 @@ pub trait NposSolver {
 	fn weight<T: WeightInfo>(voters: u32, targets: u32, vote_degree: u32) -> Weight;
 }
 
+/// A quick and dirty solver, that produces a valid but probably worthless election result, but is
+/// fast.
+///
+/// It choses a random number of winners without any consideration.
+///
+/// Then it iterates over the voters and assigns them to the winners.
+///
+/// It is only meant to be used in benchmarking.
+pub struct QuickDirtySolver<AccountId, Accuracy>(core::marker::PhantomData<(AccountId, Accuracy)>);
+impl<AccountId: IdentifierT, Accuracy: PerThing128> NposSolver
+	for QuickDirtySolver<AccountId, Accuracy>
+{
+	type AccountId = AccountId;
+	type Accuracy = Accuracy;
+	type Error = &'static str;
+
+	fn solve(
+		to_elect: usize,
+		targets: Vec<Self::AccountId>,
+		voters: Vec<(
+			Self::AccountId,
+			VoteWeight,
+			impl Clone + IntoIterator<Item = Self::AccountId>,
+		)>,
+	) -> Result<ElectionResult<Self::AccountId, Self::Accuracy>, Self::Error> {
+		use sp_std::collections::btree_map::BTreeMap;
+
+		if to_elect > targets.len() {
+			return Err("to_elect is greater than the number of targets.");
+		}
+
+		let winners = targets.into_iter().take(to_elect).collect::<Vec<_>>();
+
+		let mut assignments = Vec::with_capacity(voters.len());
+		let mut final_winners = BTreeMap::<Self::AccountId, u128>::new();
+
+		for (voter, weight, votes) in voters {
+			let our_winners = winners
+				.iter()
+				.filter(|w| votes.clone().into_iter().any(|v| v == **w))
+				.collect::<Vec<_>>();
+			let our_winners_len = our_winners.len();
+			let distribution = our_winners
+				.into_iter()
+				.map(|w| {
+					*final_winners.entry(w.clone()).or_default() += weight as u128;
+					(w.clone(), Self::Accuracy::from_rational(1, our_winners_len as u128))
+				})
+				.collect::<Vec<_>>();
+
+			let mut assignment = Assignment { who: voter, distribution };
+			assignment.try_normalize().unwrap();
+			assignments.push(assignment);
+		}
+
+		let winners = final_winners.into_iter().collect::<Vec<_>>();
+		Ok(ElectionResult { winners, assignments })
+	}
+
+	fn weight<T: WeightInfo>(_: u32, _: u32, _: u32) -> Weight {
+		Default::default()
+	}
+}
+
 /// A wrapper for [`sp_npos_elections::seq_phragmen`] that implements [`NposSolver`]. See the
 /// documentation of [`sp_npos_elections::seq_phragmen`] for more info.
 pub struct SequentialPhragmen<AccountId, Accuracy, Balancing = ()>(
@@ -632,7 +779,11 @@ impl<AccountId: IdentifierT, Accuracy: PerThing128, Balancing: Get<Option<Balanc
 	fn solve(
 		winners: usize,
 		targets: Vec<Self::AccountId>,
-		voters: Vec<(Self::AccountId, VoteWeight, impl IntoIterator<Item = Self::AccountId>)>,
+		voters: Vec<(
+			Self::AccountId,
+			VoteWeight,
+			impl Clone + IntoIterator<Item = Self::AccountId>,
+		)>,
 	) -> Result<ElectionResult<Self::AccountId, Self::Accuracy>, Self::Error> {
 		sp_npos_elections::seq_phragmen(winners, targets, voters, Balancing::get())
 	}
@@ -657,7 +808,11 @@ impl<AccountId: IdentifierT, Accuracy: PerThing128, Balancing: Get<Option<Balanc
 	fn solve(
 		winners: usize,
 		targets: Vec<Self::AccountId>,
-		voters: Vec<(Self::AccountId, VoteWeight, impl IntoIterator<Item = Self::AccountId>)>,
+		voters: Vec<(
+			Self::AccountId,
+			VoteWeight,
+			impl Clone + IntoIterator<Item = Self::AccountId>,
+		)>,
 	) -> Result<ElectionResult<Self::AccountId, Self::Accuracy>, Self::Error> {
 		sp_npos_elections::phragmms(winners, targets, voters, Balancing::get())
 	}
@@ -674,10 +829,301 @@ pub type Voter<AccountId, Bound> = (AccountId, VoteWeight, BoundedVec<AccountId,
 pub type VoterOf<D> =
 	Voter<<D as ElectionDataProvider>::AccountId, <D as ElectionDataProvider>::MaxVotesPerVoter>;
 
-/// Same as `BoundedSupports` but parameterized by a `ElectionProviderBase`.
+/// A bounded vector of supports. Bounded equivalent to [`sp_npos_elections::Supports`].
+#[derive(Default, Debug, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)]
+#[codec(mel_bound(AccountId: MaxEncodedLen, Bound: Get<u32>))]
+#[scale_info(skip_type_params(Bound))]
+pub struct BoundedSupport<AccountId, Bound: Get<u32>> {
+	/// Total support.
+	pub total: ExtendedBalance,
+	/// Support from voters.
+	pub voters: BoundedVec<(AccountId, ExtendedBalance), Bound>,
+}
+
+impl<AccountId, Bound: Get<u32>> sp_npos_elections::Backings for &BoundedSupport<AccountId, Bound> {
+	fn total(&self) -> ExtendedBalance {
+		self.total
+	}
+}
+
+impl<AccountId: PartialEq, Bound: Get<u32>> PartialEq for BoundedSupport<AccountId, Bound> {
+	fn eq(&self, other: &Self) -> bool {
+		self.total == other.total && self.voters == other.voters
+	}
+}
+
+impl<AccountId, Bound: Get<u32>> From<BoundedSupport<AccountId, Bound>> for Support<AccountId> {
+	fn from(b: BoundedSupport<AccountId, Bound>) -> Self {
+		Support { total: b.total, voters: b.voters.into_inner() }
+	}
+}
+
+impl<AccountId: Clone, Bound: Get<u32>> Clone for BoundedSupport<AccountId, Bound> {
+	fn clone(&self) -> Self {
+		Self { voters: self.voters.clone(), total: self.total }
+	}
+}
+
+impl<AccountId, Bound: Get<u32>> TryFrom<sp_npos_elections::Support<AccountId>>
+	for BoundedSupport<AccountId, Bound>
+{
+	type Error = &'static str;
+	fn try_from(s: sp_npos_elections::Support<AccountId>) -> Result<Self, Self::Error> {
+		let voters = s.voters.try_into().map_err(|_| "voters bound not respected")?;
+		Ok(Self { voters, total: s.total })
+	}
+}
+
+impl<AccountId: Clone, Bound: Get<u32>> BoundedSupport<AccountId, Bound> {
+	pub fn sorted_truncate_from(mut support: sp_npos_elections::Support<AccountId>) -> (Self, u32) {
+		// If bounds meet, then short circuit.
+		if let Ok(bounded) = support.clone().try_into() {
+			return (bounded, 0)
+		}
+
+		let pre_len = support.voters.len();
+		// sort support based on stake of each backer, low to high.
+		// Note: we don't sort high to low and truncate because we would have to track `total`
+		// updates, so we need one iteration anyhow.
+		support.voters.sort_by(|a, b| a.1.cmp(&b.1));
+		// then do the truncation.
+		let mut bounded = Self { voters: Default::default(), total: 0 };
+		while let Some((voter, weight)) = support.voters.pop() {
+			if let Err(_) = bounded.voters.try_push((voter, weight)) {
+				break
+			}
+			bounded.total += weight;
+		}
+		let post_len = bounded.voters.len();
+		(bounded, (pre_len - post_len) as u32)
+	}
+}
+
+/// A bounded vector of [`BoundedSupport`].
+///
+/// A [`BoundedSupports`] is a set of [`sp_npos_elections::Supports`] which are bounded in two
+/// dimensions. `BInner` corresponds to the bound of the maximum backers per voter and `BOuter`
+/// corresponds to the bound of the maximum winners that the bounded supports may contain.
+///
+/// With the bounds, we control the maximum size of a bounded supports instance.
+#[derive(Encode, Decode, TypeInfo, DefaultNoBound, MaxEncodedLen)]
+#[codec(mel_bound(AccountId: MaxEncodedLen, BOuter: Get<u32>, BInner: Get<u32>))]
+#[scale_info(skip_type_params(BOuter, BInner))]
+pub struct BoundedSupports<AccountId, BOuter: Get<u32>, BInner: Get<u32>>(
+	pub BoundedVec<(AccountId, BoundedSupport<AccountId, BInner>), BOuter>,
+);
+
+/// Try and build yourself from another `BoundedSupports` with a different set of types.
+pub trait TryFromOtherBounds<AccountId, BOtherOuter: Get<u32>, BOtherInner: Get<u32>> {
+	fn try_from_other_bounds(
+		other: BoundedSupports<AccountId, BOtherOuter, BOtherInner>,
+	) -> Result<Self, crate::Error>
+	where
+		Self: Sized;
+}
+
+impl<
+		AccountId,
+		BOuter: Get<u32>,
+		BInner: Get<u32>,
+		BOtherOuter: Get<u32>,
+		BOuterInner: Get<u32>,
+	> TryFromOtherBounds<AccountId, BOtherOuter, BOuterInner>
+	for BoundedSupports<AccountId, BOuter, BInner>
+{
+	fn try_from_other_bounds(
+		other: BoundedSupports<AccountId, BOtherOuter, BOuterInner>,
+	) -> Result<Self, crate::Error> {
+		// TODO: we might as well do this with unsafe rust and do it faster.
+		if BOtherOuter::get() <= BOuter::get() && BInner::get() <= BOuterInner::get() {
+			let supports = other
+				.into_iter()
+				.map(|(acc, b_support)| {
+					b_support
+						.try_into()
+						.defensive_map_err(|_| Error::BoundsExceeded)
+						.map(|b_support| (acc, b_support))
+				})
+				.collect::<Result<Vec<_>, _>>()
+				.defensive()?;
+			supports.try_into()
+		} else {
+			Err(crate::Error::BoundsExceeded)
+		}
+	}
+}
+
+impl<AccountId: Clone, BOuter: Get<u32>, BInner: Get<u32>>
+	BoundedSupports<AccountId, BOuter, BInner>
+{
+	/// Two u32s returned are number of winners and backers removed respectively.
+	pub fn sorted_truncate_from(supports: Supports<AccountId>) -> (Self, u32, u32) {
+		// if bounds, meet, short circuit
+		if let Ok(bounded) = supports.clone().try_into() {
+			return (bounded, 0, 0)
+		}
+
+		let pre_winners = supports.len();
+		let mut backers_removed = 0;
+		// first, convert all inner supports.
+		let mut inner_supports = supports
+			.into_iter()
+			.map(|(account, support)| {
+				let (bounded, removed) =
+					BoundedSupport::<AccountId, BInner>::sorted_truncate_from(support);
+				backers_removed += removed;
+				(account, bounded)
+			})
+			.collect::<Vec<_>>();
+
+		// then sort outer supports based on total stake, high to low
+		inner_supports.sort_by(|a, b| b.1.total.cmp(&a.1.total));
+
+		// then take the first slice that can fit.
+		let bounded = BoundedSupports(BoundedVec::<
+			(AccountId, BoundedSupport<AccountId, BInner>),
+			BOuter,
+		>::truncate_from(inner_supports));
+		let post_winners = bounded.len();
+		(bounded, (pre_winners - post_winners) as u32, backers_removed)
+	}
+}
+pub trait TryFromUnboundedPagedSupports<AccountId, BOuter: Get<u32>, BInner: Get<u32>> {
+	fn try_from_unbounded_paged(
+		self,
+	) -> Result<Vec<BoundedSupports<AccountId, BOuter, BInner>>, crate::Error>
+	where
+		Self: Sized;
+}
+
+impl<AccountId, BOuter: Get<u32>, BInner: Get<u32>>
+	TryFromUnboundedPagedSupports<AccountId, BOuter, BInner> for Vec<Supports<AccountId>>
+{
+	fn try_from_unbounded_paged(
+		self,
+	) -> Result<Vec<BoundedSupports<AccountId, BOuter, BInner>>, crate::Error> {
+		self.into_iter()
+			.map(|s| s.try_into().map_err(|_| crate::Error::BoundsExceeded))
+			.collect::<Result<Vec<_>, _>>()
+	}
+}
+
+impl<AccountId, BOuter: Get<u32>, BInner: Get<u32>> sp_npos_elections::EvaluateSupport
+	for BoundedSupports<AccountId, BOuter, BInner>
+{
+	fn evaluate(&self) -> sp_npos_elections::ElectionScore {
+		sp_npos_elections::evaluate_support(self.iter().map(|(_, s)| s))
+	}
+}
+
+impl<AccountId, BOuter: Get<u32>, BInner: Get<u32>> sp_std::ops::DerefMut
+	for BoundedSupports<AccountId, BOuter, BInner>
+{
+	fn deref_mut(&mut self) -> &mut Self::Target {
+		&mut self.0
+	}
+}
+
+impl<AccountId: Debug, BOuter: Get<u32>, BInner: Get<u32>> Debug
+	for BoundedSupports<AccountId, BOuter, BInner>
+{
+	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+		for s in self.0.iter() {
+			write!(f, "({:?}, {:?}, {:?}) ", s.0, s.1.total, s.1.voters)?;
+		}
+		Ok(())
+	}
+}
+
+impl<AccountId: PartialEq, BOuter: Get<u32>, BInner: Get<u32>> PartialEq
+	for BoundedSupports<AccountId, BOuter, BInner>
+{
+	fn eq(&self, other: &Self) -> bool {
+		self.0 == other.0
+	}
+}
+
+impl<AccountId, BOuter: Get<u32>, BInner: Get<u32>> Into<Supports<AccountId>>
+	for BoundedSupports<AccountId, BOuter, BInner>
+{
+	fn into(self) -> Supports<AccountId> {
+		// TODO: can be done faster with unsafe code.
+		self.0.into_iter().map(|(acc, b_support)| (acc, b_support.into())).collect()
+	}
+}
+
+impl<AccountId, BOuter: Get<u32>, BInner: Get<u32>>
+	From<BoundedVec<(AccountId, BoundedSupport<AccountId, BInner>), BOuter>>
+	for BoundedSupports<AccountId, BOuter, BInner>
+{
+	fn from(t: BoundedVec<(AccountId, BoundedSupport<AccountId, BInner>), BOuter>) -> Self {
+		Self(t)
+	}
+}
+
+impl<AccountId: Clone, BOuter: Get<u32>, BInner: Get<u32>> Clone
+	for BoundedSupports<AccountId, BOuter, BInner>
+{
+	fn clone(&self) -> Self {
+		Self(self.0.clone())
+	}
+}
+
+impl<AccountId, BOuter: Get<u32>, BInner: Get<u32>> sp_std::ops::Deref
+	for BoundedSupports<AccountId, BOuter, BInner>
+{
+	type Target = BoundedVec<(AccountId, BoundedSupport<AccountId, BInner>), BOuter>;
+
+	fn deref(&self) -> &Self::Target {
+		&self.0
+	}
+}
+
+impl<AccountId, BOuter: Get<u32>, BInner: Get<u32>> IntoIterator
+	for BoundedSupports<AccountId, BOuter, BInner>
+{
+	type Item = (AccountId, BoundedSupport<AccountId, BInner>);
+	type IntoIter = sp_std::vec::IntoIter<Self::Item>;
+
+	fn into_iter(self) -> Self::IntoIter {
+		self.0.into_iter()
+	}
+}
+
+impl<AccountId, BOuter: Get<u32>, BInner: Get<u32>> TryFrom<Supports<AccountId>>
+	for BoundedSupports<AccountId, BOuter, BInner>
+{
+	type Error = crate::Error;
+
+	fn try_from(supports: Supports<AccountId>) -> Result<Self, Self::Error> {
+		// optimization note: pre-allocate outer bounded vec.
+		let mut outer_bounded_supports = BoundedVec::<
+			(AccountId, BoundedSupport<AccountId, BInner>),
+			BOuter,
+		>::with_bounded_capacity(
+			supports.len().min(BOuter::get() as usize)
+		);
+
+		// optimization note: avoid intermediate allocations.
+		supports
+			.into_iter()
+			.map(|(account, support)| (account, support.try_into().map_err(|_| ())))
+			.try_for_each(|(account, maybe_bounded_supports)| {
+				outer_bounded_supports
+					.try_push((account, maybe_bounded_supports?))
+					.map_err(|_| ())
+			})
+			.map_err(|_| crate::Error::BoundsExceeded)?;
+
+		Ok(outer_bounded_supports.into())
+	}
+}
+
+/// Same as `BoundedSupports` but parameterized by an `ElectionProvider`.
 pub type BoundedSupportsOf<E> = BoundedSupports<
-	<E as ElectionProviderBase>::AccountId,
-	<E as ElectionProviderBase>::MaxWinners,
+	<E as ElectionProvider>::AccountId,
+	<E as ElectionProvider>::MaxWinnersPerPage,
+	<E as ElectionProvider>::MaxBackersPerWinner,
 >;
 
 sp_core::generate_feature_enabled_macro!(
diff --git a/substrate/frame/election-provider-support/src/onchain.rs b/substrate/frame/election-provider-support/src/onchain.rs
index 1063d5d35ae..3478eec6c9d 100644
--- a/substrate/frame/election-provider-support/src/onchain.rs
+++ b/substrate/frame/election-provider-support/src/onchain.rs
@@ -20,27 +20,27 @@
 //! careful when using it onchain.
 
 use crate::{
-	bounds::{DataProviderBounds, ElectionBounds, ElectionBoundsBuilder},
-	BoundedSupportsOf, Debug, ElectionDataProvider, ElectionProvider, ElectionProviderBase,
-	InstantElectionProvider, NposSolver, WeightInfo,
+	bounds::{ElectionBounds, ElectionBoundsBuilder},
+	BoundedSupportsOf, Debug, ElectionDataProvider, ElectionProvider, InstantElectionProvider,
+	NposSolver, PageIndex, VoterOf, WeightInfo,
 };
-use alloc::collections::btree_map::BTreeMap;
+use alloc::{collections::btree_map::BTreeMap, vec::Vec};
 use core::marker::PhantomData;
 use frame_support::{dispatch::DispatchClass, traits::Get};
+use frame_system::pallet_prelude::BlockNumberFor;
 use sp_npos_elections::{
-	assignment_ratio_to_staked_normalized, to_supports, BoundedSupports, ElectionResult, VoteWeight,
+	assignment_ratio_to_staked_normalized, to_supports, ElectionResult, VoteWeight,
 };
 
 /// Errors of the on-chain election.
-#[derive(Eq, PartialEq, Debug)]
+#[derive(Eq, PartialEq, Debug, Clone)]
 pub enum Error {
 	/// An internal error in the NPoS elections crate.
 	NposElections(sp_npos_elections::Error),
 	/// Errors from the data provider.
 	DataProvider(&'static str),
-	/// Configurational error caused by `desired_targets` requested by data provider exceeding
-	/// `MaxWinners`.
-	TooManyWinners,
+	/// Results failed to meet the bounds.
+	FailedToBound,
 }
 
 impl From<sp_npos_elections::Error> for Error {
@@ -62,6 +62,12 @@ pub type BoundedExecution<T> = OnChainExecution<T>;
 
 /// Configuration trait for an onchain election execution.
 pub trait Config {
+	/// Whether to try and sort or not.
+	///
+	/// If `true`, the supports will be sorted by descending total support to meet the bounds. If
+	/// `false`, `FailedToBound` error may be returned.
+	type Sort: Get<bool>;
+
 	/// Needed for weight registration.
 	type System: frame_system::Config;
 
@@ -71,6 +77,18 @@ pub trait Config {
 		Error = sp_npos_elections::Error,
 	>;
 
+	/// Maximum number of backers allowed per target.
+	///
+	/// If the bounds are exceeded due to the data returned by the data provider, the election will
+	/// fail.
+	type MaxBackersPerWinner: Get<u32>;
+
+	/// Maximum number of winners in an election.
+	///
+	/// If the bounds are exceeded due to the data returned by the data provider, the election will
+	/// fail.
+	type MaxWinnersPerPage: Get<u32>;
+
 	/// Something that provides the data for election.
 	type DataProvider: ElectionDataProvider<
 		AccountId = <Self::System as frame_system::Config>::AccountId,
@@ -80,102 +98,106 @@ pub trait Config {
 	/// Weight information for extrinsics in this pallet.
 	type WeightInfo: WeightInfo;
 
-	/// Upper bound on maximum winners from electable targets.
-	///
-	/// As noted in the documentation of [`ElectionProviderBase::MaxWinners`], this value should
-	/// always be more than `DataProvider::desired_target`.
-	type MaxWinners: Get<u32>;
-
 	/// Elections bounds, to use when calling into [`Config::DataProvider`]. It might be overwritten
 	/// in the `InstantElectionProvider` impl.
 	type Bounds: Get<ElectionBounds>;
 }
 
-/// Same as `BoundedSupportsOf` but for `onchain::Config`.
-pub type OnChainBoundedSupportsOf<E> = BoundedSupports<
-	<<E as Config>::System as frame_system::Config>::AccountId,
-	<E as Config>::MaxWinners,
->;
-
-fn elect_with_input_bounds<T: Config>(
-	bounds: ElectionBounds,
-) -> Result<OnChainBoundedSupportsOf<T>, Error> {
-	let (voters, targets) = T::DataProvider::electing_voters(bounds.voters)
-		.and_then(|voters| Ok((voters, T::DataProvider::electable_targets(bounds.targets)?)))
-		.map_err(Error::DataProvider)?;
-
-	let desired_targets = T::DataProvider::desired_targets().map_err(Error::DataProvider)?;
+impl<T: Config> OnChainExecution<T> {
+	fn elect_with_snapshot(
+		voters: Vec<VoterOf<T::DataProvider>>,
+		targets: Vec<<T::System as frame_system::Config>::AccountId>,
+		desired_targets: u32,
+	) -> Result<BoundedSupportsOf<Self>, Error> {
+		if (desired_targets > T::MaxWinnersPerPage::get()) && !T::Sort::get() {
+			// early exit what will fail in the last line anyways.
+			return Err(Error::FailedToBound)
+		}
 
-	if desired_targets > T::MaxWinners::get() {
-		// early exit
-		return Err(Error::TooManyWinners)
+		let voters_len = voters.len() as u32;
+		let targets_len = targets.len() as u32;
+
+		let stake_map: BTreeMap<_, _> = voters
+			.iter()
+			.map(|(validator, vote_weight, _)| (validator.clone(), *vote_weight))
+			.collect();
+
+		let stake_of = |w: &<T::System as frame_system::Config>::AccountId| -> VoteWeight {
+			stake_map.get(w).cloned().unwrap_or_default()
+		};
+
+		let ElectionResult { winners: _, assignments } =
+			T::Solver::solve(desired_targets as usize, targets, voters).map_err(Error::from)?;
+
+		let staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)?;
+
+		let weight = T::Solver::weight::<T::WeightInfo>(
+			voters_len,
+			targets_len,
+			<T::DataProvider as ElectionDataProvider>::MaxVotesPerVoter::get(),
+		);
+		frame_system::Pallet::<T::System>::register_extra_weight_unchecked(
+			weight,
+			DispatchClass::Mandatory,
+		);
+
+		let unbounded = to_supports(&staked);
+		let bounded = if T::Sort::get() {
+			let (bounded, _winners_removed, _backers_removed) =
+				BoundedSupportsOf::<Self>::sorted_truncate_from(unbounded);
+			bounded
+		} else {
+			unbounded.try_into().map_err(|_| Error::FailedToBound)?
+		};
+		Ok(bounded)
 	}
 
-	let voters_len = voters.len() as u32;
-	let targets_len = targets.len() as u32;
-
-	let stake_map: BTreeMap<_, _> = voters
-		.iter()
-		.map(|(validator, vote_weight, _)| (validator.clone(), *vote_weight))
-		.collect();
-
-	let stake_of = |w: &<T::System as frame_system::Config>::AccountId| -> VoteWeight {
-		stake_map.get(w).cloned().unwrap_or_default()
-	};
-
-	let ElectionResult { winners: _, assignments } =
-		T::Solver::solve(desired_targets as usize, targets, voters).map_err(Error::from)?;
-
-	let staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)?;
-
-	let weight = T::Solver::weight::<T::WeightInfo>(
-		voters_len,
-		targets_len,
-		<T::DataProvider as ElectionDataProvider>::MaxVotesPerVoter::get(),
-	);
-	frame_system::Pallet::<T::System>::register_extra_weight_unchecked(
-		weight,
-		DispatchClass::Mandatory,
-	);
-
-	// defensive: Since npos solver returns a result always bounded by `desired_targets`, this is
-	// never expected to happen as long as npos solver does what is expected for it to do.
-	let supports: OnChainBoundedSupportsOf<T> =
-		to_supports(&staked).try_into().map_err(|_| Error::TooManyWinners)?;
-
-	Ok(supports)
-}
-
-impl<T: Config> ElectionProviderBase for OnChainExecution<T> {
-	type AccountId = <T::System as frame_system::Config>::AccountId;
-	type BlockNumber = frame_system::pallet_prelude::BlockNumberFor<T::System>;
-	type Error = Error;
-	type MaxWinners = T::MaxWinners;
-	type DataProvider = T::DataProvider;
+	fn elect_with(
+		bounds: ElectionBounds,
+		page: PageIndex,
+	) -> Result<BoundedSupportsOf<Self>, Error> {
+		let (voters, targets) = T::DataProvider::electing_voters(bounds.voters, page)
+			.and_then(|voters| {
+				Ok((voters, T::DataProvider::electable_targets(bounds.targets, page)?))
+			})
+			.map_err(Error::DataProvider)?;
+		let desired_targets = T::DataProvider::desired_targets().map_err(Error::DataProvider)?;
+		Self::elect_with_snapshot(voters, targets, desired_targets)
+	}
 }
 
 impl<T: Config> InstantElectionProvider for OnChainExecution<T> {
 	fn instant_elect(
-		forced_input_voters_bounds: DataProviderBounds,
-		forced_input_targets_bounds: DataProviderBounds,
+		voters: Vec<VoterOf<T::DataProvider>>,
+		targets: Vec<<T::System as frame_system::Config>::AccountId>,
+		desired_targets: u32,
 	) -> Result<BoundedSupportsOf<Self>, Self::Error> {
-		let elections_bounds = ElectionBoundsBuilder::from(T::Bounds::get())
-			.voters_or_lower(forced_input_voters_bounds)
-			.targets_or_lower(forced_input_targets_bounds)
-			.build();
+		Self::elect_with_snapshot(voters, targets, desired_targets)
+	}
 
-		elect_with_input_bounds::<T>(elections_bounds)
+	fn bother() -> bool {
+		true
 	}
 }
 
 impl<T: Config> ElectionProvider for OnChainExecution<T> {
-	fn ongoing() -> bool {
-		false
-	}
+	type AccountId = <T::System as frame_system::Config>::AccountId;
+	type BlockNumber = BlockNumberFor<T::System>;
+	type Error = Error;
+	type MaxWinnersPerPage = T::MaxWinnersPerPage;
+	type MaxBackersPerWinner = T::MaxBackersPerWinner;
+	// can support any number of pages, as this is meant to be called "instantly". We don't care
+	// about this value here.
+	type Pages = sp_core::ConstU32<1>;
+	type DataProvider = T::DataProvider;
 
-	fn elect() -> Result<BoundedSupportsOf<Self>, Self::Error> {
+	fn elect(page: PageIndex) -> Result<BoundedSupportsOf<Self>, Self::Error> {
 		let election_bounds = ElectionBoundsBuilder::from(T::Bounds::get()).build();
-		elect_with_input_bounds::<T>(election_bounds)
+		Self::elect_with(election_bounds, page)
+	}
+
+	fn ongoing() -> bool {
+		false
 	}
 }
 
@@ -184,6 +206,7 @@ mod tests {
 	use super::*;
 	use crate::{ElectionProvider, PhragMMS, SequentialPhragmen};
 	use frame_support::{assert_noop, derive_impl, parameter_types};
+	use sp_io::TestExternalities;
 	use sp_npos_elections::Support;
 	use sp_runtime::Perbill;
 	type AccountId = u64;
@@ -231,42 +254,50 @@ mod tests {
 	struct PhragMMSParams;
 
 	parameter_types! {
-		pub static MaxWinners: u32 = 10;
+		pub static MaxWinnersPerPage: u32 = 10;
+		pub static MaxBackersPerWinner: u32 = 20;
 		pub static DesiredTargets: u32 = 2;
+		pub static Sort: bool = false;
 		pub static Bounds: ElectionBounds = ElectionBoundsBuilder::default().voters_count(600.into()).targets_count(400.into()).build();
 	}
 
 	impl Config for PhragmenParams {
+		type Sort = Sort;
 		type System = Runtime;
 		type Solver = SequentialPhragmen<AccountId, Perbill>;
 		type DataProvider = mock_data_provider::DataProvider;
-		type WeightInfo = ();
-		type MaxWinners = MaxWinners;
+		type MaxWinnersPerPage = MaxWinnersPerPage;
+		type MaxBackersPerWinner = MaxBackersPerWinner;
 		type Bounds = Bounds;
+		type WeightInfo = ();
 	}
 
 	impl Config for PhragMMSParams {
+		type Sort = Sort;
 		type System = Runtime;
 		type Solver = PhragMMS<AccountId, Perbill>;
 		type DataProvider = mock_data_provider::DataProvider;
+		type MaxWinnersPerPage = MaxWinnersPerPage;
+		type MaxBackersPerWinner = MaxBackersPerWinner;
 		type WeightInfo = ();
-		type MaxWinners = MaxWinners;
 		type Bounds = Bounds;
 	}
 
 	mod mock_data_provider {
+		use super::*;
+		use crate::{data_provider, DataProviderBounds, PageIndex, VoterOf};
 		use frame_support::traits::ConstU32;
 		use sp_runtime::bounded_vec;
 
-		use super::*;
-		use crate::{data_provider, VoterOf};
-
 		pub struct DataProvider;
 		impl ElectionDataProvider for DataProvider {
 			type AccountId = AccountId;
 			type BlockNumber = BlockNumber;
 			type MaxVotesPerVoter = ConstU32<2>;
-			fn electing_voters(_: DataProviderBounds) -> data_provider::Result<Vec<VoterOf<Self>>> {
+			fn electing_voters(
+				_: DataProviderBounds,
+				_page: PageIndex,
+			) -> data_provider::Result<Vec<VoterOf<Self>>> {
 				Ok(vec![
 					(1, 10, bounded_vec![10, 20]),
 					(2, 20, bounded_vec![30, 20]),
@@ -274,7 +305,10 @@ mod tests {
 				])
 			}
 
-			fn electable_targets(_: DataProviderBounds) -> data_provider::Result<Vec<AccountId>> {
+			fn electable_targets(
+				_: DataProviderBounds,
+				_page: PageIndex,
+			) -> data_provider::Result<Vec<AccountId>> {
 				Ok(vec![10, 20, 30])
 			}
 
@@ -290,40 +324,101 @@ mod tests {
 
 	#[test]
 	fn onchain_seq_phragmen_works() {
-		sp_io::TestExternalities::new_empty().execute_with(|| {
+		TestExternalities::new_empty().execute_with(|| {
+			let expected_supports = vec![
+				(
+					10 as AccountId,
+					Support { total: 25, voters: vec![(1 as AccountId, 10), (3, 15)] },
+				),
+				(30, Support { total: 35, voters: vec![(2, 20), (3, 15)] }),
+			]
+			.try_into()
+			.unwrap();
+
 			assert_eq!(
-				<OnChainExecution::<PhragmenParams> as ElectionProvider>::elect().unwrap(),
-				vec![
-					(10, Support { total: 25, voters: vec![(1, 10), (3, 15)] }),
-					(30, Support { total: 35, voters: vec![(2, 20), (3, 15)] })
-				]
+				<OnChainExecution::<PhragmenParams> as ElectionProvider>::elect(0).unwrap(),
+				expected_supports,
 			);
 		})
 	}
 
 	#[test]
-	fn too_many_winners_when_desired_targets_exceed_max_winners() {
-		sp_io::TestExternalities::new_empty().execute_with(|| {
-			// given desired targets larger than max winners
-			DesiredTargets::set(10);
-			MaxWinners::set(9);
+	fn sorting_false_works() {
+		TestExternalities::new_empty().execute_with(|| {
+			// Default results would have 3 targets, but we allow for only 2.
+			DesiredTargets::set(3);
+			MaxWinnersPerPage::set(2);
+
+			assert_noop!(
+				<OnChainExecution::<PhragmenParams> as ElectionProvider>::elect(0),
+				Error::FailedToBound,
+			);
+		});
+
+		TestExternalities::new_empty().execute_with(|| {
+			// Default results would have 2 backers per winner
+			MaxBackersPerWinner::set(1);
 
 			assert_noop!(
-				<OnChainExecution::<PhragmenParams> as ElectionProvider>::elect(),
-				Error::TooManyWinners,
+				<OnChainExecution::<PhragmenParams> as ElectionProvider>::elect(0),
+				Error::FailedToBound,
+			);
+		});
+	}
+
+	#[test]
+	fn sorting_true_works_winners() {
+		Sort::set(true);
+
+		TestExternalities::new_empty().execute_with(|| {
+			let expected_supports =
+				vec![(30, Support { total: 35, voters: vec![(2, 20), (3, 15)] })]
+					.try_into()
+					.unwrap();
+
+			// we want to allow 1 winner only, and allow sorting.
+			MaxWinnersPerPage::set(1);
+
+			assert_eq!(
+				<OnChainExecution::<PhragmenParams> as ElectionProvider>::elect(0).unwrap(),
+				expected_supports,
+			);
+		});
+
+		MaxWinnersPerPage::set(10);
+
+		TestExternalities::new_empty().execute_with(|| {
+			let expected_supports = vec![
+				(30, Support { total: 20, voters: vec![(2, 20)] }),
+				(10 as AccountId, Support { total: 15, voters: vec![(3 as AccountId, 15)] }),
+			]
+			.try_into()
+			.unwrap();
+
+			// we want to allow 2 winners only but 1 backer each, and allow sorting.
+			MaxBackersPerWinner::set(1);
+
+			assert_eq!(
+				<OnChainExecution::<PhragmenParams> as ElectionProvider>::elect(0).unwrap(),
+				expected_supports,
 			);
 		})
 	}
 
 	#[test]
 	fn onchain_phragmms_works() {
-		sp_io::TestExternalities::new_empty().execute_with(|| {
+		TestExternalities::new_empty().execute_with(|| {
 			assert_eq!(
-				<OnChainExecution::<PhragMMSParams> as ElectionProvider>::elect().unwrap(),
+				<OnChainExecution::<PhragMMSParams> as ElectionProvider>::elect(0).unwrap(),
 				vec![
-					(10, Support { total: 25, voters: vec![(1, 10), (3, 15)] }),
+					(
+						10 as AccountId,
+						Support { total: 25, voters: vec![(1 as AccountId, 10), (3, 15)] }
+					),
 					(30, Support { total: 35, voters: vec![(2, 20), (3, 15)] })
 				]
+				.try_into()
+				.unwrap()
 			);
 		})
 	}
diff --git a/substrate/frame/election-provider-support/src/tests.rs b/substrate/frame/election-provider-support/src/tests.rs
index 6e3deb9e383..de4bac3664b 100644
--- a/substrate/frame/election-provider-support/src/tests.rs
+++ b/substrate/frame/election-provider-support/src/tests.rs
@@ -18,10 +18,10 @@
 //! Tests for solution-type.
 
 #![cfg(test)]
-
-use crate::{mock::*, IndexAssignment, NposSolution};
+use crate::{mock::*, BoundedSupports, IndexAssignment, NposSolution};
 use frame_support::traits::ConstU32;
 use rand::SeedableRng;
+use sp_npos_elections::{Support, Supports};
 
 mod solution_type {
 	use super::*;
@@ -452,3 +452,29 @@ fn index_assignments_generate_same_solution_as_plain_assignments() {
 
 	assert_eq!(solution, index_compact);
 }
+
+#[test]
+fn sorted_truncate_from_works() {
+	let supports: Supports<u32> = vec![
+		(1, Support { total: 303, voters: vec![(100, 100), (101, 101), (102, 102)] }),
+		(2, Support { total: 201, voters: vec![(100, 100), (101, 101)] }),
+		(3, Support { total: 406, voters: vec![(100, 100), (101, 101), (102, 102), (103, 103)] }),
+	];
+
+	let (bounded, winners_removed, backers_removed) =
+		BoundedSupports::<u32, ConstU32<2>, ConstU32<2>>::sorted_truncate_from(supports);
+	// we trim 2 as it has least total support, and trim backers based on stake.
+	assert_eq!(
+		bounded
+			.clone()
+			.into_iter()
+			.map(|(k, v)| (k, Support { total: v.total, voters: v.voters.into_inner() }))
+			.collect::<Vec<_>>(),
+		vec![
+			(3, Support { total: 205, voters: vec![(103, 103), (102, 102)] }),
+			(1, Support { total: 203, voters: vec![(102, 102), (101, 101)] })
+		]
+	);
+	assert_eq!(winners_removed, 1);
+	assert_eq!(backers_removed, 3);
+}
diff --git a/substrate/frame/election-provider-support/src/traits.rs b/substrate/frame/election-provider-support/src/traits.rs
index 84fd57992d3..d8ffd41d8ae 100644
--- a/substrate/frame/election-provider-support/src/traits.rs
+++ b/substrate/frame/election-provider-support/src/traits.rs
@@ -42,6 +42,8 @@ where
 		+ Clone
 		+ Bounded
 		+ Encode
+		+ Ord
+		+ PartialOrd
 		+ TypeInfo;
 
 	/// The target type. Needs to be an index (convert to usize).
@@ -53,6 +55,8 @@ where
 		+ Clone
 		+ Bounded
 		+ Encode
+		+ Ord
+		+ PartialOrd
 		+ TypeInfo;
 
 	/// The weight/accuracy type of each vote.
@@ -123,4 +127,23 @@ where
 		voter_at: impl Fn(Self::VoterIndex) -> Option<A>,
 		target_at: impl Fn(Self::TargetIndex) -> Option<A>,
 	) -> Result<Vec<Assignment<A, Self::Accuracy>>, Error>;
+
+	/// Sort self by the means of the given function.
+	///
+	/// This might be helpful to allow for easier trimming.
+	fn sort<F>(&mut self, voter_stake: F)
+	where
+		F: FnMut(&Self::VoterIndex) -> VoteWeight;
+
+	/// Remove the least staked voter.
+	///
+	/// This is ONLY sensible to do if [`Self::sort`] has been called on the struct at least once.
+	fn remove_weakest_sorted<F>(&mut self, voter_stake: F) -> Option<Self::VoterIndex>
+	where
+		F: FnMut(&Self::VoterIndex) -> VoteWeight;
+
+	/// Make this solution corrupt. This should set the index of a voter to `Bounded::max_value()`.
+	///
+	/// Obviously, this is only useful for testing.
+	fn corrupt(&mut self);
 }
diff --git a/substrate/frame/elections-phragmen/src/benchmarking.rs b/substrate/frame/elections-phragmen/src/benchmarking.rs
index 60771fa89ad..6e8850aca9d 100644
--- a/substrate/frame/elections-phragmen/src/benchmarking.rs
+++ b/substrate/frame/elections-phragmen/src/benchmarking.rs
@@ -71,7 +71,10 @@ fn submit_candidates<T: Config>(
 				RawOrigin::Signed(account.clone()).into(),
 				candidate_count::<T>(),
 			)
-			.map_err(|_| "failed to submit candidacy")?;
+			.map_err(|e| {
+				log::error!(target: crate::LOG_TARGET, "failed to submit candidacy: {:?}", e);
+				"failed to submit candidacy"
+			})?;
 			Ok(account)
 		})
 		.collect::<Result<_, _>>()
@@ -152,6 +155,10 @@ mod benchmarks {
 	// -- Signed ones
 	#[benchmark]
 	fn vote_equal(v: Linear<1, { T::MaxVotesPerVoter::get() }>) -> Result<(), BenchmarkError> {
+		assert!(
+			T::MaxCandidates::get() > T::MaxVotesPerVoter::get(),
+			"MaxCandidates should be more than MaxVotesPerVoter"
+		);
 		clean::<T>();
 
 		// create a bunch of candidates.
@@ -459,6 +466,9 @@ mod benchmarks {
 		let all_candidates = submit_candidates_with_self_vote::<T>(c, "candidates")?;
 		let _ =
 			distribute_voters::<T>(all_candidates, v.saturating_sub(c), votes_per_voter as usize)?;
+		log::info!(target: crate::LOG_TARGET, "[v = {:?}]voters: {:?}",v, v.saturating_sub(c));
+		log::info!(target: crate::LOG_TARGET, "votes_per_voter: {:?}",votes_per_voter);
+		log::info!(target: crate::LOG_TARGET, "candidates: {:?}",c);
 
 		#[block]
 		{
diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs
index 67f7ee21e61..5d8aed59ff6 100644
--- a/substrate/frame/fast-unstake/src/mock.rs
+++ b/substrate/frame/fast-unstake/src/mock.rs
@@ -16,6 +16,7 @@
 // limitations under the License.
 
 use crate::{self as fast_unstake};
+use frame_election_provider_support::PageIndex;
 use frame_support::{
 	assert_ok, derive_impl,
 	pallet_prelude::*,
@@ -82,25 +83,28 @@ parameter_types! {
 	pub static BondingDuration: u32 = 3;
 	pub static CurrentEra: u32 = 0;
 	pub static Ongoing: bool = false;
-	pub static MaxWinners: u32 = 100;
 }
 
 pub struct MockElection;
-impl frame_election_provider_support::ElectionProviderBase for MockElection {
-	type AccountId = AccountId;
+
+impl frame_election_provider_support::ElectionProvider for MockElection {
 	type BlockNumber = BlockNumber;
-	type MaxWinners = MaxWinners;
+	type AccountId = AccountId;
 	type DataProvider = Staking;
+	type MaxBackersPerWinner = ConstU32<100>;
+	type MaxWinnersPerPage = ConstU32<100>;
+	type Pages = ConstU32<1>;
 	type Error = ();
-}
 
-impl frame_election_provider_support::ElectionProvider for MockElection {
+	fn elect(
+		_remaining_pages: PageIndex,
+	) -> Result<frame_election_provider_support::BoundedSupportsOf<Self>, Self::Error> {
+		Err(())
+	}
+
 	fn ongoing() -> bool {
 		Ongoing::get()
 	}
-	fn elect() -> Result<frame_election_provider_support::BoundedSupportsOf<Self>, Self::Error> {
-		Err(())
-	}
 }
 
 #[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)]
@@ -200,7 +204,7 @@ impl ExtBuilder {
 				(v, Exposure { total: 0, own: 0, others })
 			})
 			.for_each(|(validator, exposure)| {
-				pallet_staking::EraInfo::<T>::set_exposure(era, &validator, exposure);
+				pallet_staking::EraInfo::<T>::upsert_exposure(era, &validator, exposure);
 			});
 	}
 
@@ -300,7 +304,7 @@ pub fn create_exposed_nominator(exposed: AccountId, era: u32) {
 	// create an exposed nominator in passed era
 	let mut exposure = pallet_staking::EraInfo::<T>::get_full_exposure(era, &VALIDATORS_PER_ERA);
 	exposure.others.push(IndividualExposure { who: exposed, value: 0 as Balance });
-	pallet_staking::EraInfo::<T>::set_exposure(era, &VALIDATORS_PER_ERA, exposure);
+	pallet_staking::EraInfo::<T>::upsert_exposure(era, &VALIDATORS_PER_ERA, exposure);
 
 	Balances::make_free_balance_be(&exposed, 100);
 	assert_ok!(Staking::bond(
diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml
index 4072d65b626..8fe651de43d 100644
--- a/substrate/frame/grandpa/Cargo.toml
+++ b/substrate/frame/grandpa/Cargo.toml
@@ -42,6 +42,7 @@ pallet-staking = { workspace = true, default-features = true }
 pallet-staking-reward-curve = { workspace = true, default-features = true }
 pallet-timestamp = { workspace = true, default-features = true }
 sp-keyring = { workspace = true, default-features = true }
+sp-tracing = { workspace = true, default-features = true }
 
 [features]
 default = ["std"]
diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs
index cb754fb7955..a14bdc9d73b 100644
--- a/substrate/frame/grandpa/src/mock.rs
+++ b/substrate/frame/grandpa/src/mock.rs
@@ -32,14 +32,14 @@ use frame_support::{
 };
 use pallet_session::historical as pallet_session_historical;
 use sp_consensus_grandpa::{RoundNumber, SetId, GRANDPA_ENGINE_ID};
-use sp_core::H256;
+use sp_core::{ConstBool, H256};
 use sp_keyring::Ed25519Keyring;
 use sp_runtime::{
 	curve::PiecewiseLinear,
 	impl_opaque_keys,
 	testing::{TestXt, UintAuthorityId},
 	traits::OpaqueKeys,
-	BuildStorage, DigestItem, Perbill,
+	BoundedVec, BuildStorage, DigestItem, Perbill,
 };
 use sp_staking::{EraIndex, SessionIndex};
 
@@ -155,7 +155,9 @@ impl onchain::Config for OnChainSeqPhragmen {
 	type Solver = SequentialPhragmen<u64, Perbill>;
 	type DataProvider = Staking;
 	type WeightInfo = ();
-	type MaxWinners = ConstU32<100>;
+	type MaxWinnersPerPage = ConstU32<100>;
+	type MaxBackersPerWinner = ConstU32<100>;
+	type Sort = ConstBool<true>;
 	type Bounds = ElectionsBoundsOnChain;
 }
 
@@ -222,6 +224,7 @@ pub fn new_test_ext(vec: Vec<(u64, u64)>) -> sp_io::TestExternalities {
 }
 
 pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestExternalities {
+	sp_tracing::try_init_simple();
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 
 	let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect();
@@ -259,7 +262,7 @@ pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestEx
 		validator_count: 8,
 		force_era: pallet_staking::Forcing::ForceNew,
 		minimum_validator_count: 0,
-		invulnerables: vec![],
+		invulnerables: BoundedVec::new(),
 		..Default::default()
 	};
 
@@ -288,8 +291,9 @@ pub fn start_session(session_index: SessionIndex) {
 		Timestamp::set_timestamp(System::block_number() * 6000);
 
 		System::on_initialize(System::block_number());
-		Session::on_initialize(System::block_number());
+		// staking has to be initialized before session as per the multi-block staking PR.
 		Staking::on_initialize(System::block_number());
+		Session::on_initialize(System::block_number());
 		Grandpa::on_initialize(System::block_number());
 	}
 
diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs
index 39ff6fb7a09..2e73ad7cf4f 100644
--- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs
+++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs
@@ -85,7 +85,7 @@ impl pallet_staking::Config for Runtime {
 	type AdminOrigin = frame_system::EnsureRoot<Self::AccountId>;
 	type EraPayout = pallet_staking::ConvertCurve<RewardCurve>;
 	type ElectionProvider =
-		frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>;
+		frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, (), ())>;
 	type GenesisElectionProvider = Self::ElectionProvider;
 	type VoterList = VoterList;
 	type TargetList = pallet_staking::UseValidatorsMap<Self>;
diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs
index 77a57e5a815..84d23a994e6 100644
--- a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs
+++ b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs
@@ -102,7 +102,7 @@ impl pallet_staking::Config for Runtime {
 	type BondingDuration = BondingDuration;
 	type EraPayout = pallet_staking::ConvertCurve<RewardCurve>;
 	type ElectionProvider =
-		frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>;
+		frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, (), ())>;
 	type GenesisElectionProvider = Self::ElectionProvider;
 	type VoterList = VoterList;
 	type TargetList = pallet_staking::UseValidatorsMap<Self>;
diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs
index 3c81f2a664e..46a4e18c5e8 100644
--- a/substrate/frame/offences/benchmarking/src/mock.rs
+++ b/substrate/frame/offences/benchmarking/src/mock.rs
@@ -111,6 +111,7 @@ pallet_staking_reward_curve::build! {
 parameter_types! {
 	pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS;
 	pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build();
+	pub const Sort: bool = true;
 }
 
 pub struct OnChainSeqPhragmen;
@@ -119,7 +120,9 @@ impl onchain::Config for OnChainSeqPhragmen {
 	type Solver = SequentialPhragmen<AccountId, Perbill>;
 	type DataProvider = Staking;
 	type WeightInfo = ();
-	type MaxWinners = ConstU32<100>;
+	type MaxWinnersPerPage = ConstU32<100>;
+	type MaxBackersPerWinner = ConstU32<100>;
+	type Sort = Sort;
 	type Bounds = ElectionsBounds;
 }
 
diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs
index 9b319cabb09..2303221c881 100644
--- a/substrate/frame/root-offences/src/mock.rs
+++ b/substrate/frame/root-offences/src/mock.rs
@@ -26,8 +26,10 @@ use frame_election_provider_support::{
 use frame_support::{
 	derive_impl, parameter_types,
 	traits::{ConstU32, ConstU64, OneSessionHandler},
+	BoundedVec,
 };
 use pallet_staking::StakerStatus;
+use sp_core::ConstBool;
 use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage};
 use sp_staking::{EraIndex, SessionIndex};
 
@@ -110,7 +112,9 @@ impl onchain::Config for OnChainSeqPhragmen {
 	type Solver = SequentialPhragmen<AccountId, Perbill>;
 	type DataProvider = Staking;
 	type WeightInfo = ();
-	type MaxWinners = ConstU32<100>;
+	type MaxWinnersPerPage = ConstU32<100>;
+	type MaxBackersPerWinner = ConstU32<100>;
+	type Sort = ConstBool<true>;
 	type Bounds = ElectionsBounds;
 }
 
@@ -180,7 +184,7 @@ impl Config for Test {
 pub struct ExtBuilder {
 	validator_count: u32,
 	minimum_validator_count: u32,
-	invulnerables: Vec<AccountId>,
+	invulnerables: BoundedVec<AccountId, <Test as pallet_staking::Config>::MaxInvulnerables>,
 	balance_factor: Balance,
 }
 
@@ -189,7 +193,7 @@ impl Default for ExtBuilder {
 		Self {
 			validator_count: 2,
 			minimum_validator_count: 0,
-			invulnerables: vec![],
+			invulnerables: BoundedVec::new(),
 			balance_factor: 1,
 		}
 	}
diff --git a/substrate/frame/session/benchmarking/src/inner.rs b/substrate/frame/session/benchmarking/src/inner.rs
index 9789b6bb593..8fda2bb4655 100644
--- a/substrate/frame/session/benchmarking/src/inner.rs
+++ b/substrate/frame/session/benchmarking/src/inner.rs
@@ -58,6 +58,7 @@ mod benchmarks {
 			false,
 			true,
 			RewardDestination::Staked,
+			pallet_staking::CurrentEra::<T>::get().unwrap_or_default(),
 		)?;
 		let v_controller = pallet_staking::Pallet::<T>::bonded(&v_stash).ok_or("not stash")?;
 
@@ -82,6 +83,7 @@ mod benchmarks {
 			false,
 			true,
 			RewardDestination::Staked,
+			pallet_staking::CurrentEra::<T>::get().unwrap_or_default(),
 		)?;
 		let v_controller = pallet_staking::Pallet::<T>::bonded(&v_stash).ok_or("not stash")?;
 		let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap();
diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs
index 74201da3d2f..b0681f5aa00 100644
--- a/substrate/frame/session/benchmarking/src/mock.rs
+++ b/substrate/frame/session/benchmarking/src/mock.rs
@@ -119,6 +119,7 @@ pallet_staking_reward_curve::build! {
 parameter_types! {
 	pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS;
 	pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build();
+	pub const Sort: bool = true;
 }
 
 pub struct OnChainSeqPhragmen;
@@ -127,7 +128,9 @@ impl onchain::Config for OnChainSeqPhragmen {
 	type Solver = SequentialPhragmen<AccountId, sp_runtime::Perbill>;
 	type DataProvider = Staking;
 	type WeightInfo = ();
-	type MaxWinners = ConstU32<100>;
+	type MaxWinnersPerPage = ConstU32<100>;
+	type MaxBackersPerWinner = ConstU32<100>;
+	type Sort = Sort;
 	type Bounds = ElectionsBounds;
 }
 
diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs
index e8b4a355f49..98ce774e428 100644
--- a/substrate/frame/session/src/lib.rs
+++ b/substrate/frame/session/src/lib.rs
@@ -639,13 +639,12 @@ impl<T: Config> Pallet<T> {
 	/// punishment after a fork.
 	pub fn rotate_session() {
 		let session_index = CurrentIndex::<T>::get();
-		log::trace!(target: "runtime::session", "rotating session {:?}", session_index);
-
 		let changed = QueuedChanged::<T>::get();
 
 		// Inform the session handlers that a session is going to end.
 		T::SessionHandler::on_before_session_ending();
 		T::SessionManager::end_session(session_index);
+		log::trace!(target: "runtime::session", "ending_session {:?}", session_index);
 
 		// Get queued session keys and validators.
 		let session_keys = QueuedKeys::<T>::get();
@@ -661,11 +660,17 @@ impl<T: Config> Pallet<T> {
 		// Increment session index.
 		let session_index = session_index + 1;
 		CurrentIndex::<T>::put(session_index);
-
 		T::SessionManager::start_session(session_index);
+		log::trace!(target: "runtime::session", "starting_session {:?}", session_index);
 
 		// Get next validator set.
 		let maybe_next_validators = T::SessionManager::new_session(session_index + 1);
+		log::trace!(
+			target: "runtime::session",
+			"planning_session {:?} with {:?} validators",
+			session_index + 1,
+			maybe_next_validators.as_ref().map(|v| v.len())
+		);
 		let (next_validators, next_identities_changed) =
 			if let Some(validators) = maybe_next_validators {
 				// NOTE: as per the documentation on `OnSessionEnding`, we consider
diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml
index 74b1c78e9cb..ee69c29af16 100644
--- a/substrate/frame/staking/Cargo.toml
+++ b/substrate/frame/staking/Cargo.toml
@@ -27,16 +27,18 @@ pallet-authorship = { workspace = true }
 pallet-session = { features = [
 	"historical",
 ], workspace = true }
+rand = { features = ["alloc"], workspace = true }
+rand_chacha = { workspace = true }
 scale-info = { features = ["derive", "serde"], workspace = true }
 serde = { features = ["alloc", "derive"], workspace = true }
 sp-application-crypto = { features = ["serde"], workspace = true }
+sp-core = { workspace = true }
 sp-io = { workspace = true }
 sp-runtime = { features = ["serde"], workspace = true }
 sp-staking = { features = ["serde"], workspace = true }
 
 # Optional imports for benchmarking
 frame-benchmarking = { optional = true, workspace = true }
-rand_chacha = { optional = true, workspace = true }
 
 [dev-dependencies]
 frame-benchmarking = { workspace = true, default-features = true }
@@ -47,7 +49,6 @@ pallet-balances = { workspace = true, default-features = true }
 pallet-staking-reward-curve = { workspace = true, default-features = true }
 pallet-timestamp = { workspace = true, default-features = true }
 rand_chacha = { workspace = true, default-features = true }
-sp-core = { workspace = true, default-features = true }
 sp-npos-elections = { workspace = true, default-features = true }
 sp-tracing = { workspace = true, default-features = true }
 substrate-test-utils = { workspace = true }
@@ -66,10 +67,13 @@ std = [
 	"pallet-balances/std",
 	"pallet-session/std",
 	"pallet-timestamp/std",
+	"rand/std",
+	"rand_chacha/std",
 	"scale-info/std",
 	"serde/std",
 	"sp-application-crypto/std",
 	"sp-core/std",
+	"sp-core/std",
 	"sp-io/std",
 	"sp-npos-elections/std",
 	"sp-runtime/std",
@@ -84,7 +88,6 @@ runtime-benchmarks = [
 	"pallet-bags-list/runtime-benchmarks",
 	"pallet-balances/runtime-benchmarks",
 	"pallet-timestamp/runtime-benchmarks",
-	"rand_chacha",
 	"sp-runtime/runtime-benchmarks",
 	"sp-staking/runtime-benchmarks",
 ]
diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs
index 59d272168d6..0d084629d66 100644
--- a/substrate/frame/staking/src/benchmarking.rs
+++ b/substrate/frame/staking/src/benchmarking.rs
@@ -19,32 +19,32 @@
 
 use super::*;
 use crate::{asset, ConfigOp, Pallet as Staking};
-use testing_utils::*;
-
 use codec::Decode;
+pub use frame_benchmarking::{
+	impl_benchmark_test_suite, v2::*, whitelist_account, whitelisted_caller, BenchmarkError,
+};
 use frame_election_provider_support::{bounds::DataProviderBounds, SortedListProvider};
 use frame_support::{
 	pallet_prelude::*,
 	storage::bounded_vec::BoundedVec,
-	traits::{Get, Imbalance, UnfilteredDispatchable},
+	traits::{Get, Imbalance},
 };
+use frame_system::RawOrigin;
 use sp_runtime::{
 	traits::{Bounded, One, StaticLookup, TrailingZeroInput, Zero},
 	Perbill, Percent, Saturating,
 };
 use sp_staking::{currency_to_vote::CurrencyToVote, SessionIndex};
-
-pub use frame_benchmarking::{
-	impl_benchmark_test_suite, v2::*, whitelist_account, whitelisted_caller, BenchmarkError,
-};
-use frame_system::RawOrigin;
+use testing_utils::*;
 
 const SEED: u32 = 0;
 const MAX_SPANS: u32 = 100;
 const MAX_SLASHES: u32 = 1000;
 
-type MaxValidators<T> = <<T as Config>::BenchmarkingConfig as BenchmarkingConfig>::MaxValidators;
-type MaxNominators<T> = <<T as Config>::BenchmarkingConfig as BenchmarkingConfig>::MaxNominators;
+type BenchMaxValidators<T> =
+	<<T as Config>::BenchmarkingConfig as BenchmarkingConfig>::MaxValidators;
+type BenchMaxNominators<T> =
+	<<T as Config>::BenchmarkingConfig as BenchmarkingConfig>::MaxNominators;
 
 // Add slashing spans to a user account. Not relevant for actual use, only to benchmark
 // read and write operations.
@@ -73,6 +73,7 @@ pub fn create_validator_with_nominators<T: Config>(
 	dead_controller: bool,
 	unique_controller: bool,
 	destination: RewardDestination<T::AccountId>,
+	era: u32,
 ) -> Result<(T::AccountId, Vec<(T::AccountId, T::AccountId)>), &'static str> {
 	// Clean up any existing state.
 	clear_validators_and_nominators::<T>();
@@ -113,9 +114,16 @@ pub fn create_validator_with_nominators<T: Config>(
 	}
 
 	ValidatorCount::<T>::put(1);
+	MinimumValidatorCount::<T>::put(1);
+
+	// Start a new (genesis) Era
+	// populate electable stashes as it gets read within `try_plan_new_era`
 
-	// Start a new Era
-	let new_validators = Staking::<T>::try_trigger_new_era(SessionIndex::one(), true).unwrap();
+	// ElectableStashes::<T>::put(
+	// 	BoundedBTreeSet::try_from(vec![v_stash.clone()].into_iter().collect::<BTreeSet<_>>())
+	// 		.unwrap(),
+	// );
+	let new_validators = Staking::<T>::try_plan_new_era(SessionIndex::one(), true).unwrap();
 
 	assert_eq!(new_validators.len(), 1);
 	assert_eq!(new_validators[0], v_stash, "Our validator was not selected!");
@@ -128,14 +136,13 @@ pub fn create_validator_with_nominators<T: Config>(
 		individual: points_individual.into_iter().collect(),
 	};
 
-	let current_era = CurrentEra::<T>::get().unwrap();
-	ErasRewardPoints::<T>::insert(current_era, reward);
+	ErasRewardPoints::<T>::insert(era, reward);
 
 	// Create reward pool
 	let total_payout = asset::existential_deposit::<T>()
 		.saturating_mul(upper_bound.into())
 		.saturating_mul(1000u32.into());
-	<ErasValidatorReward<T>>::insert(current_era, total_payout);
+	<ErasValidatorReward<T>>::insert(era, total_payout);
 
 	Ok((v_stash, nominators))
 }
@@ -223,6 +230,123 @@ const USER_SEED: u32 = 999666;
 mod benchmarks {
 	use super::*;
 
+	#[benchmark]
+	fn on_initialize_noop() {
+		assert!(ElectableStashes::<T>::get().is_empty());
+		assert_eq!(NextElectionPage::<T>::get(), None);
+
+		#[block]
+		{
+			Pallet::<T>::on_initialize(1_u32.into());
+		}
+
+		assert!(ElectableStashes::<T>::get().is_empty());
+		assert_eq!(NextElectionPage::<T>::get(), None);
+	}
+
+	#[benchmark]
+	fn do_elect_paged_inner(
+		v: Linear<1, { T::MaxValidatorSet::get() }>,
+	) -> Result<(), BenchmarkError> {
+		// TODO: re-benchmark this
+		// use frame_election_provider_support::{
+		// 	BoundedSupport, BoundedSupportsOf, ElectionProvider,
+		// };
+		// let mut bounded_random_supports = BoundedSupportsOf::<T::ElectionProvider>::default();
+		// for i in 0..v {
+		// 	let backed = account("validator", i, SEED);
+		// 	let mut total = 0;
+		// 	let voters = (0..<T::ElectionProvider as ElectionProvider>::MaxBackersPerWinner::get())
+		// 		.map(|j| {
+		// 			let voter = account("nominator", j, SEED);
+		// 			let support = 100000;
+		// 			total += support;
+		// 			(voter, support)
+		// 		})
+		// 		.collect::<Vec<_>>()
+		// 		.try_into()
+		// 		.unwrap();
+		// 	bounded_random_supports
+		// 		.try_push((backed, BoundedSupport { total, voters }))
+		// 		.map_err(|_| "bound failed")
+		// 		.expect("map is over the correct bound");
+		// }
+
+		#[block]
+		{
+			// assert_eq!(Pallet::<T>::do_elect_paged_inner(bounded_random_supports), Ok(v as
+			// usize));
+		}
+
+		// assert!(!ElectableStashes::<T>::get().is_empty());
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn get_npos_voters(
+		// number of validator intention. we will iterate all of them.
+		v: Linear<{ BenchMaxValidators::<T>::get() / 2 }, { BenchMaxValidators::<T>::get() }>,
+
+		// number of nominator intention. we will iterate all of them.
+		n: Linear<{ BenchMaxNominators::<T>::get() / 2 }, { BenchMaxNominators::<T>::get() }>,
+	) -> Result<(), BenchmarkError> {
+		create_validators_with_nominators_for_era::<T>(
+			v,
+			n,
+			MaxNominationsOf::<T>::get() as usize,
+			false,
+			None,
+		)?;
+
+		assert_eq!(Validators::<T>::count(), v);
+		assert_eq!(Nominators::<T>::count(), n);
+
+		let num_voters = (v + n) as usize;
+
+		// default bounds are unbounded.
+		let voters;
+		#[block]
+		{
+			voters = <Staking<T>>::get_npos_voters(
+				DataProviderBounds::default(),
+				&SnapshotStatus::<T::AccountId>::Waiting,
+			);
+		}
+
+		assert_eq!(voters.len(), num_voters);
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn get_npos_targets(
+		// number of validator intention.
+		v: Linear<{ BenchMaxValidators::<T>::get() / 2 }, { BenchMaxValidators::<T>::get() }>,
+	) -> Result<(), BenchmarkError> {
+		// number of nominator intention.
+		let n = BenchMaxNominators::<T>::get();
+		create_validators_with_nominators_for_era::<T>(
+			v,
+			n,
+			MaxNominationsOf::<T>::get() as usize,
+			false,
+			None,
+		)?;
+
+		let targets;
+
+		#[block]
+		{
+			// default bounds are unbounded.
+			targets = <Staking<T>>::get_npos_targets(DataProviderBounds::default());
+		}
+
+		assert_eq!(targets.len() as u32, v);
+
+		Ok(())
+	}
+
 	#[benchmark]
 	fn bond() {
 		let stash = create_funded_user::<T>("stash", USER_SEED, 100);
@@ -569,7 +693,7 @@ mod benchmarks {
 
 	#[benchmark]
 	fn set_validator_count() {
-		let validator_count = MaxValidators::<T>::get();
+		let validator_count = BenchMaxValidators::<T>::get();
 
 		#[extrinsic_call]
 		_(RawOrigin::Root, validator_count);
@@ -603,7 +727,7 @@ mod benchmarks {
 
 	#[benchmark]
 	// Worst case scenario, the list of invulnerables is very long.
-	fn set_invulnerables(v: Linear<0, { MaxValidators::<T>::get() }>) {
+	fn set_invulnerables(v: Linear<0, { T::MaxInvulnerables::get() }>) {
 		let mut invulnerables = Vec::new();
 		for i in 0..v {
 			invulnerables.push(account("invulnerable", i, SEED));
@@ -699,15 +823,20 @@ mod benchmarks {
 	fn payout_stakers_alive_staked(
 		n: Linear<0, { T::MaxExposurePageSize::get() as u32 }>,
 	) -> Result<(), BenchmarkError> {
+		// reset genesis era 0 so that triggering the new genesis era works as expected.
+		CurrentEra::<T>::set(Some(0));
+		let current_era = CurrentEra::<T>::get().unwrap();
+		Staking::<T>::clear_era_information(current_era);
+
 		let (validator, nominators) = create_validator_with_nominators::<T>(
 			n,
 			T::MaxExposurePageSize::get() as u32,
 			false,
 			true,
 			RewardDestination::Staked,
+			current_era,
 		)?;
 
-		let current_era = CurrentEra::<T>::get().unwrap();
 		// set the commission for this particular era as well.
 		<ErasValidatorPrefs<T>>::insert(
 			current_era,
@@ -822,91 +951,6 @@ mod benchmarks {
 		Ok(())
 	}
 
-	#[benchmark]
-	fn new_era(v: Linear<1, 10>, n: Linear<0, 100>) -> Result<(), BenchmarkError> {
-		create_validators_with_nominators_for_era::<T>(
-			v,
-			n,
-			MaxNominationsOf::<T>::get() as usize,
-			false,
-			None,
-		)?;
-		let session_index = SessionIndex::one();
-
-		let validators;
-		#[block]
-		{
-			validators =
-				Staking::<T>::try_trigger_new_era(session_index, true).ok_or("`new_era` failed")?;
-		}
-
-		assert!(validators.len() == v as usize);
-
-		Ok(())
-	}
-
-	#[benchmark(extra)]
-	fn payout_all(v: Linear<1, 10>, n: Linear<0, 100>) -> Result<(), BenchmarkError> {
-		create_validators_with_nominators_for_era::<T>(
-			v,
-			n,
-			MaxNominationsOf::<T>::get() as usize,
-			false,
-			None,
-		)?;
-		// Start a new Era
-		let new_validators = Staking::<T>::try_trigger_new_era(SessionIndex::one(), true).unwrap();
-		assert!(new_validators.len() == v as usize);
-
-		let current_era = CurrentEra::<T>::get().unwrap();
-		let mut points_total = 0;
-		let mut points_individual = Vec::new();
-		let mut payout_calls_arg = Vec::new();
-
-		for validator in new_validators.iter() {
-			points_total += 10;
-			points_individual.push((validator.clone(), 10));
-			payout_calls_arg.push((validator.clone(), current_era));
-		}
-
-		// Give Era Points
-		let reward = EraRewardPoints::<T::AccountId> {
-			total: points_total,
-			individual: points_individual.into_iter().collect(),
-		};
-
-		ErasRewardPoints::<T>::insert(current_era, reward);
-
-		// Create reward pool
-		let total_payout = asset::existential_deposit::<T>() * 1000u32.into();
-		<ErasValidatorReward<T>>::insert(current_era, total_payout);
-
-		let caller: T::AccountId = whitelisted_caller();
-		let origin = RawOrigin::Signed(caller);
-		let calls: Vec<_> = payout_calls_arg
-			.iter()
-			.map(|arg| {
-				Call::<T>::payout_stakers_by_page {
-					validator_stash: arg.0.clone(),
-					era: arg.1,
-					page: 0,
-				}
-				.encode()
-			})
-			.collect();
-
-		#[block]
-		{
-			for call in calls {
-				<Call<T> as Decode>::decode(&mut &*call)
-					.expect("call is encoded above, encoding must be correct")
-					.dispatch_bypass_filter(origin.clone().into())?;
-			}
-		}
-
-		Ok(())
-	}
-
 	#[benchmark(extra)]
 	fn do_slash(
 		l: Linear<1, { T::MaxUnlockingChunks::get() as u32 }>,
@@ -939,67 +983,6 @@ mod benchmarks {
 		Ok(())
 	}
 
-	#[benchmark]
-	fn get_npos_voters(
-		// number of validator intention. we will iterate all of them.
-		v: Linear<{ MaxValidators::<T>::get() / 2 }, { MaxValidators::<T>::get() }>,
-
-		// number of nominator intention. we will iterate all of them.
-		n: Linear<{ MaxNominators::<T>::get() / 2 }, { MaxNominators::<T>::get() }>,
-	) -> Result<(), BenchmarkError> {
-		create_validators_with_nominators_for_era::<T>(
-			v,
-			n,
-			MaxNominationsOf::<T>::get() as usize,
-			false,
-			None,
-		)?;
-
-		assert_eq!(Validators::<T>::count(), v);
-		assert_eq!(Nominators::<T>::count(), n);
-
-		let num_voters = (v + n) as usize;
-
-		// default bounds are unbounded.
-		let voters;
-		#[block]
-		{
-			voters = <Staking<T>>::get_npos_voters(DataProviderBounds::default());
-		}
-
-		assert_eq!(voters.len(), num_voters);
-
-		Ok(())
-	}
-
-	#[benchmark]
-	fn get_npos_targets(
-		// number of validator intention.
-		v: Linear<{ MaxValidators::<T>::get() / 2 }, { MaxValidators::<T>::get() }>,
-	) -> Result<(), BenchmarkError> {
-		// number of nominator intention.
-		let n = MaxNominators::<T>::get();
-		create_validators_with_nominators_for_era::<T>(
-			v,
-			n,
-			MaxNominationsOf::<T>::get() as usize,
-			false,
-			None,
-		)?;
-
-		let targets;
-
-		#[block]
-		{
-			// default bounds are unbounded.
-			targets = <Staking<T>>::get_npos_targets(DataProviderBounds::default());
-		}
-
-		assert_eq!(targets.len() as u32, v);
-
-		Ok(())
-	}
-
 	#[benchmark]
 	fn set_staking_configs_all_set() {
 		#[extrinsic_call]
@@ -1199,19 +1182,19 @@ mod tests {
 		ExtBuilder::default().build_and_execute(|| {
 			let n = 10;
 
+			let current_era = CurrentEra::<Test>::get().unwrap();
 			let (validator_stash, nominators) = create_validator_with_nominators::<Test>(
 				n,
 				<<Test as Config>::MaxExposurePageSize as Get<_>>::get(),
 				false,
 				false,
 				RewardDestination::Staked,
+				current_era,
 			)
 			.unwrap();
 
 			assert_eq!(nominators.len() as u32, n);
 
-			let current_era = CurrentEra::<Test>::get().unwrap();
-
 			let original_stakeable_balance = asset::stakeable_balance::<Test>(&validator_stash);
 			assert_ok!(Staking::payout_stakers_by_page(
 				RuntimeOrigin::signed(1337),
@@ -1237,6 +1220,7 @@ mod tests {
 				false,
 				false,
 				RewardDestination::Staked,
+				CurrentEra::<Test>::get().unwrap(),
 			)
 			.unwrap();
 
diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs
index 42230cb27b7..f97b4ed30b8 100644
--- a/substrate/frame/staking/src/lib.rs
+++ b/substrate/frame/staking/src/lib.rs
@@ -143,6 +143,40 @@
 //! The pallet implement the trait `SessionManager`. Which is the only API to query new validator
 //! set and allowing these validator set to be rewarded once their era is ended.
 //!
+//! ## Multi-page election support
+//!
+//! > Unless explicitly stated on the contrary, one page is the equivalent of one block. "Pages" and
+//! "blocks" are used interchangibly across the documentation.
+//!
+//! The pallet supports a multi-page election. In a multi-page election, some key actions of the
+//! staking pallet progress over multi pages/blocks. Most notably:
+//! 1. **Snapshot creation**: The voter snapshot *may be* created over multi blocks. The
+//!    [`frame_election_provider_support::ElectionDataProvider`] trait supports that functionality
+//!    by parameterizing the electing voters by the page index. Even though the target snapshot
+//!    could be paged, this pallet implements a single-page target snapshot only.
+//! 2. **Election**: The election is multi-block, where a set of supports is fetched per page/block.
+//!    This pallet keeps track of the elected stashes and their exposures as the paged election is
+//!    called. The [`frame_election_provider_support::ElectionProvider`] trait supports this
+//!    functionality by parameterizing the elect call with the page index.
+//!
+//! Note: [`frame_election_provider_support::ElectionDataProvider`] trait supports mulit-paged
+//! target snaphsot. However, this pallet only supports and implements a single-page snapshot.
+//! Calling `ElectionDataProvider::electable_targets` with a different index than 0 is redundant
+//! and the single page idx 0 of targets be returned.
+//!
+//! ### Prepare an election ahead of time with `on_initialize`
+//!
+//! This pallet is expected to have a set of winners ready and their exposures collected and stored
+//! at the time of a predicted election. In order to ensure that, it starts to fetch the paged
+//! results of an election from the [`frame_election_provider_support::ElectionProvider`] `N` pages
+//! ahead of the next election prediction.
+//!
+//! As the pages of winners are fetched, their exposures and era info are processed and stored so
+//! that all the data is ready at the time of the next election.
+//!
+//! Even though this pallet supports mulit-page elections, it also can be used in a single page
+//! context provided that the configs are set accordingly.
+//!
 //! ## Interface
 //!
 //! ### Dispatchable Functions
@@ -294,6 +328,8 @@ pub mod testing_utils;
 pub(crate) mod mock;
 #[cfg(test)]
 mod tests;
+#[cfg(test)]
+mod tests_paged_election;
 
 pub mod asset;
 pub mod election_size_tracker;
@@ -309,6 +345,7 @@ extern crate alloc;
 
 use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec};
 use codec::{Decode, Encode, HasCompact, MaxEncodedLen};
+use frame_election_provider_support::ElectionProvider;
 use frame_support::{
 	defensive, defensive_assert,
 	traits::{
@@ -348,9 +385,22 @@ macro_rules! log {
 	};
 }
 
-/// Maximum number of winners (aka. active validators), as defined in the election provider of this
-/// pallet.
-pub type MaxWinnersOf<T> = <<T as Config>::ElectionProvider as frame_election_provider_support::ElectionProviderBase>::MaxWinners;
+/// Alias for a bounded set of exposures behind a validator, parameterized by this pallet's
+/// election provider.
+pub type BoundedExposuresOf<T> = BoundedVec<
+	(
+		<T as frame_system::Config>::AccountId,
+		Exposure<<T as frame_system::Config>::AccountId, BalanceOf<T>>,
+	),
+	MaxWinnersPerPageOf<<T as Config>::ElectionProvider>,
+>;
+
+/// Alias for the maximum number of winners (aka. active validators), as defined in by this pallet's
+/// config.
+pub type MaxWinnersOf<T> = <T as Config>::MaxValidatorSet;
+
+/// Alias for the maximum number of winners per page, as expected by the election provider.
+pub type MaxWinnersPerPageOf<P> = <P as ElectionProvider>::MaxWinnersPerPage;
 
 /// Maximum number of nominations per nominator.
 pub type MaxNominationsOf<T> =
@@ -438,6 +488,18 @@ pub struct UnlockChunk<Balance: HasCompact + MaxEncodedLen> {
 	era: EraIndex,
 }
 
+/// Status of a paged snapshot progress.
+#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen, Default)]
+pub enum SnapshotStatus<AccountId> {
+	/// Paged snapshot is in progress, the `AccountId` was the last staker iterated in the list.
+	Ongoing(AccountId),
+	/// All the stakers in the system have been consumed since the snapshot started.
+	Consumed,
+	/// Waiting for a new snapshot to be requested.
+	#[default]
+	Waiting,
+}
+
 /// The ledger of a (bonded) stash.
 ///
 /// Note: All the reads and mutations to the [`Ledger`], [`Bonded`] and [`Payee`] storage items
@@ -1047,44 +1109,12 @@ where
 pub struct EraInfo<T>(core::marker::PhantomData<T>);
 impl<T: Config> EraInfo<T> {
 	/// Returns true if validator has one or more page of era rewards not claimed yet.
-	// Also looks at legacy storage that can be cleaned up after #433.
 	pub fn pending_rewards(era: EraIndex, validator: &T::AccountId) -> bool {
-		let page_count = if let Some(overview) = <ErasStakersOverview<T>>::get(&era, validator) {
-			overview.page_count
-		} else {
-			if <ErasStakers<T>>::contains_key(era, validator) {
-				// this means non paged exposure, and we treat them as single paged.
-				1
-			} else {
-				// if no exposure, then no rewards to claim.
-				return false
-			}
-		};
-
-		// check if era is marked claimed in legacy storage.
-		if <Ledger<T>>::get(validator)
-			.map(|l| l.legacy_claimed_rewards.contains(&era))
-			.unwrap_or_default()
-		{
-			return false
-		}
-
-		ClaimedRewards::<T>::get(era, validator).len() < page_count as usize
-	}
-
-	/// Temporary function which looks at both (1) passed param `T::StakingLedger` for legacy
-	/// non-paged rewards, and (2) `T::ClaimedRewards` for paged rewards. This function can be
-	/// removed once `T::HistoryDepth` eras have passed and none of the older non-paged rewards
-	/// are relevant/claimable.
-	// Refer tracker issue for cleanup: https://github.com/paritytech/polkadot-sdk/issues/433
-	pub(crate) fn is_rewards_claimed_with_legacy_fallback(
-		era: EraIndex,
-		ledger: &StakingLedger<T>,
-		validator: &T::AccountId,
-		page: Page,
-	) -> bool {
-		ledger.legacy_claimed_rewards.binary_search(&era).is_ok() ||
-			Self::is_rewards_claimed(era, validator, page)
+		<ErasStakersOverview<T>>::get(&era, validator)
+			.map(|overview| {
+				ClaimedRewards::<T>::get(era, validator).len() < overview.page_count as usize
+			})
+			.unwrap_or(false)
 	}
 
 	/// Check if the rewards for the given era and page index have been claimed.
@@ -1105,20 +1135,7 @@ impl<T: Config> EraInfo<T> {
 		validator: &T::AccountId,
 		page: Page,
 	) -> Option<PagedExposure<T::AccountId, BalanceOf<T>>> {
-		let overview = <ErasStakersOverview<T>>::get(&era, validator);
-
-		// return clipped exposure if page zero and paged exposure does not exist
-		// exists for backward compatibility and can be removed as part of #13034
-		if overview.is_none() && page == 0 {
-			return Some(PagedExposure::from_clipped(<ErasStakersClipped<T>>::get(era, validator)))
-		}
-
-		// no exposure for this validator
-		if overview.is_none() {
-			return None
-		}
-
-		let overview = overview.expect("checked above; qed");
+		let overview = <ErasStakersOverview<T>>::get(&era, validator)?;
 
 		// validator stake is added only in page zero
 		let validator_stake = if page == 0 { overview.own } else { Zero::zero() };
@@ -1139,13 +1156,9 @@ impl<T: Config> EraInfo<T> {
 		era: EraIndex,
 		validator: &T::AccountId,
 	) -> Exposure<T::AccountId, BalanceOf<T>> {
-		let overview = <ErasStakersOverview<T>>::get(&era, validator);
-
-		if overview.is_none() {
-			return ErasStakers::<T>::get(era, validator)
-		}
-
-		let overview = overview.expect("checked above; qed");
+		let Some(overview) = <ErasStakersOverview<T>>::get(&era, validator) else {
+			return Exposure::default();
+		};
 
 		let mut others = Vec::with_capacity(overview.nominator_count as usize);
 		for page in 0..overview.page_count {
@@ -1176,20 +1189,7 @@ impl<T: Config> EraInfo<T> {
 	}
 
 	/// Returns the next page that can be claimed or `None` if nothing to claim.
-	pub(crate) fn get_next_claimable_page(
-		era: EraIndex,
-		validator: &T::AccountId,
-		ledger: &StakingLedger<T>,
-	) -> Option<Page> {
-		if Self::is_non_paged_exposure(era, validator) {
-			return match ledger.legacy_claimed_rewards.binary_search(&era) {
-				// already claimed
-				Ok(_) => None,
-				// Non-paged exposure is considered as a single page
-				Err(_) => Some(0),
-			}
-		}
-
+	pub(crate) fn get_next_claimable_page(era: EraIndex, validator: &T::AccountId) -> Option<Page> {
 		// Find next claimable page of paged exposure.
 		let page_count = Self::get_page_count(era, validator);
 		let all_claimable_pages: Vec<Page> = (0..page_count).collect();
@@ -1198,11 +1198,6 @@ impl<T: Config> EraInfo<T> {
 		all_claimable_pages.into_iter().find(|p| !claimed_pages.contains(p))
 	}
 
-	/// Checks if exposure is paged or not.
-	fn is_non_paged_exposure(era: EraIndex, validator: &T::AccountId) -> bool {
-		<ErasStakersClipped<T>>::contains_key(&era, validator)
-	}
-
 	/// Returns validator commission for this era and page.
 	pub(crate) fn get_validator_commission(
 		era: EraIndex,
@@ -1229,39 +1224,105 @@ impl<T: Config> EraInfo<T> {
 	}
 
 	/// Store exposure for elected validators at start of an era.
-	pub fn set_exposure(
+	///
+	/// If the exposure does not exist yet for the tuple (era, validator), it sets it. Otherwise,
+	/// it updates the existing record by ensuring *intermediate* exposure pages are filled up with
+	/// `T::MaxExposurePageSize` number of backers per page and the remaining exposures are added
+	/// to new exposure pages.
+	pub fn upsert_exposure(
 		era: EraIndex,
 		validator: &T::AccountId,
-		exposure: Exposure<T::AccountId, BalanceOf<T>>,
+		mut exposure: Exposure<T::AccountId, BalanceOf<T>>,
 	) {
 		let page_size = T::MaxExposurePageSize::get().defensive_max(1);
 
-		let nominator_count = exposure.others.len();
-		// expected page count is the number of nominators divided by the page size, rounded up.
-		let expected_page_count = nominator_count
-			.defensive_saturating_add((page_size as usize).defensive_saturating_sub(1))
-			.saturating_div(page_size as usize);
-
-		let (exposure_metadata, exposure_pages) = exposure.into_pages(page_size);
-		defensive_assert!(exposure_pages.len() == expected_page_count, "unexpected page count");
+		if let Some(stored_overview) = ErasStakersOverview::<T>::get(era, &validator) {
+			let last_page_idx = stored_overview.page_count.saturating_sub(1);
+
+			let mut last_page =
+				ErasStakersPaged::<T>::get((era, validator, last_page_idx)).unwrap_or_default();
+			let last_page_empty_slots =
+				T::MaxExposurePageSize::get().saturating_sub(last_page.others.len() as u32);
+
+			// splits the exposure so that `exposures_append` will fit within the last exposure
+			// page, up to the max exposure page size. The remaining individual exposures in
+			// `exposure` will be added to new pages.
+			let exposures_append = exposure.split_others(last_page_empty_slots);
+
+			ErasStakersOverview::<T>::mutate(era, &validator, |stored| {
+				// new metadata is updated based on 3 different set of exposures: the
+				// current one, the exposure split to be "fitted" into the current last page and
+				// the exposure set that will be appended from the new page onwards.
+				let new_metadata =
+					stored.defensive_unwrap_or_default().update_with::<T::MaxExposurePageSize>(
+						[&exposures_append, &exposure]
+							.iter()
+							.fold(Default::default(), |total, expo| {
+								total.saturating_add(expo.total.saturating_sub(expo.own))
+							}),
+						[&exposures_append, &exposure]
+							.iter()
+							.fold(Default::default(), |count, expo| {
+								count.saturating_add(expo.others.len() as u32)
+							}),
+					);
+				*stored = new_metadata.into();
+			});
 
-		<ErasStakersOverview<T>>::insert(era, &validator, &exposure_metadata);
-		exposure_pages.iter().enumerate().for_each(|(page, paged_exposure)| {
-			<ErasStakersPaged<T>>::insert((era, &validator, page as Page), &paged_exposure);
-		});
+			// fill up last page with exposures.
+			last_page.page_total = last_page
+				.page_total
+				.saturating_add(exposures_append.total)
+				.saturating_sub(exposures_append.own);
+			last_page.others.extend(exposures_append.others);
+			ErasStakersPaged::<T>::insert((era, &validator, last_page_idx), last_page);
+
+			// now handle the remaining exposures and append the exposure pages. The metadata update
+			// has been already handled above.
+			let (_, exposure_pages) = exposure.into_pages(page_size);
+
+			exposure_pages.iter().enumerate().for_each(|(idx, paged_exposure)| {
+				let append_at =
+					(last_page_idx.saturating_add(1).saturating_add(idx as u32)) as Page;
+				<ErasStakersPaged<T>>::insert((era, &validator, append_at), &paged_exposure);
+			});
+		} else {
+			// expected page count is the number of nominators divided by the page size, rounded up.
+			let expected_page_count = exposure
+				.others
+				.len()
+				.defensive_saturating_add((page_size as usize).defensive_saturating_sub(1))
+				.saturating_div(page_size as usize);
+
+			// no exposures yet for this (era, validator) tuple, calculate paged exposure pages and
+			// metadata from a blank slate.
+			let (exposure_metadata, exposure_pages) = exposure.into_pages(page_size);
+			defensive_assert!(exposure_pages.len() == expected_page_count, "unexpected page count");
+
+			// insert metadata.
+			ErasStakersOverview::<T>::insert(era, &validator, exposure_metadata);
+
+			// insert validator's overview.
+			exposure_pages.iter().enumerate().for_each(|(idx, paged_exposure)| {
+				let append_at = idx as Page;
+				<ErasStakersPaged<T>>::insert((era, &validator, append_at), &paged_exposure);
+			});
+		};
 	}
 
-	/// Store total exposure for all the elected validators in the era.
-	pub(crate) fn set_total_stake(era: EraIndex, total_stake: BalanceOf<T>) {
-		<ErasTotalStake<T>>::insert(era, total_stake);
+	/// Update the total exposure for all the elected validators in the era.
+	pub(crate) fn add_total_stake(era: EraIndex, stake: BalanceOf<T>) {
+		<ErasTotalStake<T>>::mutate(era, |total_stake| {
+			*total_stake += stake;
+		});
 	}
 }
 
 /// Configurations of the benchmarking of the pallet.
 pub trait BenchmarkingConfig {
-	/// The maximum number of validators to use.
+	/// The maximum number of validators to use for snapshot creation.
 	type MaxValidators: Get<u32>;
-	/// The maximum number of nominators to use.
+	/// The maximum number of nominators to use for snapshot creation, per page.
 	type MaxNominators: Get<u32>;
 }
 
diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs
index 9dfa93c70b3..08667dd6176 100644
--- a/substrate/frame/staking/src/migrations.rs
+++ b/substrate/frame/staking/src/migrations.rs
@@ -64,8 +64,39 @@ type StorageVersion<T: Config> = StorageValue<Pallet<T>, ObsoleteReleases, Value
 /// severity for re-enabling purposes.
 pub mod v16 {
 	use super::*;
+	use frame_support::Twox64Concat;
 	use sp_staking::offence::OffenceSeverity;
 
+	#[frame_support::storage_alias]
+	pub(crate) type Invulnerables<T: Config> =
+		StorageValue<Pallet<T>, Vec<<T as frame_system::Config>::AccountId>, ValueQuery>;
+
+	#[frame_support::storage_alias]
+	pub(crate) type DisabledValidators<T: Config> =
+		StorageValue<Pallet<T>, Vec<(u32, OffenceSeverity)>, ValueQuery>;
+
+	#[frame_support::storage_alias]
+	pub(crate) type ErasStakers<T: Config> = StorageDoubleMap<
+		Pallet<T>,
+		Twox64Concat,
+		EraIndex,
+		Twox64Concat,
+		<T as frame_system::Config>::AccountId,
+		Exposure<<T as frame_system::Config>::AccountId, BalanceOf<T>>,
+		ValueQuery,
+	>;
+
+	#[frame_support::storage_alias]
+	pub(crate) type ErasStakersClipped<T: Config> = StorageDoubleMap<
+		Pallet<T>,
+		Twox64Concat,
+		EraIndex,
+		Twox64Concat,
+		<T as frame_system::Config>::AccountId,
+		Exposure<<T as frame_system::Config>::AccountId, BalanceOf<T>>,
+		ValueQuery,
+	>;
+
 	pub struct VersionUncheckedMigrateV15ToV16<T>(core::marker::PhantomData<T>);
 	impl<T: Config> UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV15ToV16<T> {
 		#[cfg(feature = "try-runtime")]
@@ -86,7 +117,7 @@ pub mod v16 {
 				.map(|v| (v, max_offence))
 				.collect::<Vec<_>>();
 
-			DisabledValidators::<T>::set(migrated);
+			v16::DisabledValidators::<T>::set(migrated);
 
 			log!(info, "v16 applied successfully.");
 			T::DbWeight::get().reads_writes(1, 1)
diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs
index 41fb3a31d52..fdf14976a7d 100644
--- a/substrate/frame/staking/src/mock.rs
+++ b/substrate/frame/staking/src/mock.rs
@@ -20,7 +20,8 @@
 use crate::{self as pallet_staking, *};
 use frame_election_provider_support::{
 	bounds::{ElectionBounds, ElectionBoundsBuilder},
-	onchain, SequentialPhragmen, VoteWeight,
+	onchain, BoundedSupports, BoundedSupportsOf, ElectionProvider, PageIndex, SequentialPhragmen,
+	Support, VoteWeight,
 };
 use frame_support::{
 	assert_ok, derive_impl, ord_parameter_types, parameter_types,
@@ -30,15 +31,17 @@ use frame_support::{
 	weights::constants::RocksDbWeight,
 };
 use frame_system::{EnsureRoot, EnsureSignedBy};
+use sp_core::ConstBool;
 use sp_io;
 use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage};
 use sp_staking::{
 	offence::{OffenceDetails, OnOffenceHandler},
-	OnStakingUpdate,
+	OnStakingUpdate, StakingInterface,
 };
 
-pub const INIT_TIMESTAMP: u64 = 30_000;
-pub const BLOCK_TIME: u64 = 1000;
+pub(crate) const INIT_TIMESTAMP: u64 = 30_000;
+pub(crate) const BLOCK_TIME: u64 = 1000;
+pub(crate) const SINGLE_PAGE: u32 = 0;
 
 /// The AccountId alias in this test module.
 pub(crate) type AccountId = u64;
@@ -203,9 +206,10 @@ parameter_types! {
 	pub static MaxExposurePageSize: u32 = 64;
 	pub static MaxUnlockingChunks: u32 = 32;
 	pub static RewardOnUnbalanceWasCalled: bool = false;
-	pub static MaxWinners: u32 = 100;
+	pub static MaxValidatorSet: u32 = 100;
 	pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build();
 	pub static AbsoluteMaxNominations: u32 = 16;
+	pub static MaxWinnersPerPage: u32 = MaxValidatorSet::get();
 }
 
 type VoterBagsListInstance = pallet_bags_list::Instance1;
@@ -218,14 +222,87 @@ impl pallet_bags_list::Config<VoterBagsListInstance> for Test {
 	type Score = VoteWeight;
 }
 
+// multi-page types and controller.
+parameter_types! {
+	// default is single page EP.
+	pub static Pages: PageIndex = 1;
+	// Should be large enough to pass all tests, but not too big to cause benchmarking tests to be too slow.
+	pub static MaxBackersPerWinner: u32 = 256;
+	// If set, the `SingleOrMultipageElectionProvider` will return these exact values, per page
+	// index. If not, it will behave is per the code.
+	pub static CustomElectionSupports: Option<Vec<Result<BoundedSupportsOf<<Test as Config>::ElectionProvider>, onchain::Error>>> = None;
+}
+
+// An election provider wrapper that allows testing with single and multi page modes.
+pub struct SingleOrMultipageElectionProvider<SP: ElectionProvider>(core::marker::PhantomData<SP>);
+impl<
+		// single page EP.
+		SP: ElectionProvider<
+			AccountId = AccountId,
+			MaxWinnersPerPage = MaxWinnersPerPage,
+			MaxBackersPerWinner = MaxBackersPerWinner,
+			Error = onchain::Error,
+		>,
+	> ElectionProvider for SingleOrMultipageElectionProvider<SP>
+{
+	type AccountId = AccountId;
+	type BlockNumber = BlockNumber;
+	type MaxWinnersPerPage = MaxWinnersPerPage;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
+	type Pages = Pages;
+	type DataProvider = Staking;
+	type Error = onchain::Error;
+
+	fn elect(page: PageIndex) -> Result<BoundedSupportsOf<Self>, Self::Error> {
+		if let Some(maybe_paged_supports) = CustomElectionSupports::get() {
+			maybe_paged_supports[page as usize].clone()
+		} else {
+			if Pages::get() == 1 {
+				SP::elect(page)
+			} else {
+				// will take first `MaxWinnersPerPage` in the validator set as winners. in this mock
+				// impl, we return an arbitrarily but deterministic nominator exposure per
+				// winner/page.
+				let supports: Vec<(AccountId, Support<AccountId>)> =
+					Validators::<Test>::iter_keys()
+						.filter(|x| Staking::status(x) == Ok(StakerStatus::Validator))
+						.take(Self::MaxWinnersPerPage::get() as usize)
+						.map(|v| {
+							(
+								v,
+								Support {
+									total: (100 + page).into(),
+									voters: vec![((page + 1) as AccountId, (100 + page).into())],
+								},
+							)
+						})
+						.collect::<Vec<_>>();
+
+				Ok(to_bounded_supports(supports))
+			}
+		}
+	}
+	fn msp() -> PageIndex {
+		SP::msp()
+	}
+	fn lsp() -> PageIndex {
+		SP::lsp()
+	}
+	fn ongoing() -> bool {
+		SP::ongoing()
+	}
+}
+
 pub struct OnChainSeqPhragmen;
 impl onchain::Config for OnChainSeqPhragmen {
 	type System = Test;
 	type Solver = SequentialPhragmen<AccountId, Perbill>;
 	type DataProvider = Staking;
 	type WeightInfo = ();
-	type MaxWinners = MaxWinners;
 	type Bounds = ElectionsBounds;
+	type Sort = ConstBool<true>;
+	type MaxBackersPerWinner = MaxBackersPerWinner;
+	type MaxWinnersPerPage = MaxWinnersPerPage;
 }
 
 pub struct MockReward {}
@@ -275,9 +352,10 @@ impl crate::pallet::pallet::Config for Test {
 	type EraPayout = ConvertCurve<RewardCurve>;
 	type NextNewSession = Session;
 	type MaxExposurePageSize = MaxExposurePageSize;
-	type ElectionProvider = onchain::OnChainExecution<OnChainSeqPhragmen>;
-	type GenesisElectionProvider = Self::ElectionProvider;
-	// NOTE: consider a macro and use `UseNominatorsAndValidatorsMap<Self>` as well.
+	type MaxValidatorSet = MaxValidatorSet;
+	type ElectionProvider =
+		SingleOrMultipageElectionProvider<onchain::OnChainExecution<OnChainSeqPhragmen>>;
+	type GenesisElectionProvider = onchain::OnChainExecution<OnChainSeqPhragmen>;
 	type VoterList = VoterBagsList;
 	type TargetList = UseValidatorsMap<Self>;
 	type NominationsQuota = WeightedNominationsQuota<16>;
@@ -287,6 +365,8 @@ impl crate::pallet::pallet::Config for Test {
 	type EventListeners = EventListenerMock;
 	type DisablingStrategy =
 		pallet_staking::UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>;
+	type MaxInvulnerables = ConstU32<20>;
+	type MaxDisabledValidators = ConstU32<100>;
 }
 
 pub struct WeightedNominationsQuota<const MAX: u32>;
@@ -320,7 +400,7 @@ pub struct ExtBuilder {
 	nominate: bool,
 	validator_count: u32,
 	minimum_validator_count: u32,
-	invulnerables: Vec<AccountId>,
+	invulnerables: BoundedVec<AccountId, <Test as Config>::MaxInvulnerables>,
 	has_stakers: bool,
 	initialize_first_session: bool,
 	pub min_nominator_bond: Balance,
@@ -338,7 +418,7 @@ impl Default for ExtBuilder {
 			validator_count: 2,
 			minimum_validator_count: 0,
 			balance_factor: 1,
-			invulnerables: vec![],
+			invulnerables: BoundedVec::new(),
 			has_stakers: true,
 			initialize_first_session: true,
 			min_nominator_bond: ExistentialDeposit::get(),
@@ -372,7 +452,8 @@ impl ExtBuilder {
 		self
 	}
 	pub fn invulnerables(mut self, invulnerables: Vec<AccountId>) -> Self {
-		self.invulnerables = invulnerables;
+		self.invulnerables = BoundedVec::try_from(invulnerables)
+			.expect("Too many invulnerable validators: upper limit is MaxInvulnerables");
 		self
 	}
 	pub fn session_per_era(self, length: SessionIndex) -> Self {
@@ -421,10 +502,22 @@ impl ExtBuilder {
 		self.stakers.push((stash, ctrl, stake, status));
 		self
 	}
+	pub fn exposures_page_size(self, max: u32) -> Self {
+		MaxExposurePageSize::set(max);
+		self
+	}
 	pub fn balance_factor(mut self, factor: Balance) -> Self {
 		self.balance_factor = factor;
 		self
 	}
+	pub fn multi_page_election_provider(self, pages: PageIndex) -> Self {
+		Pages::set(pages);
+		self
+	}
+	pub fn max_winners_per_page(self, max: u32) -> Self {
+		MaxWinnersPerPage::set(max);
+		self
+	}
 	pub fn try_state(self, enable: bool) -> Self {
 		SkipTryStateCheck::set(!enable);
 		self
@@ -468,6 +561,7 @@ impl ExtBuilder {
 				(71, self.balance_factor * 2000),
 				(80, self.balance_factor),
 				(81, self.balance_factor * 2000),
+				(91, self.balance_factor * 2000),
 				// This allows us to have a total_payout different from 0.
 				(999, 1_000_000_000_000),
 			],
@@ -713,6 +807,13 @@ pub(crate) fn validator_controllers() -> Vec<AccountId> {
 		.collect()
 }
 
+pub(crate) fn era_exposures(era: u32) -> Vec<(AccountId, Exposure<AccountId, Balance>)> {
+	validator_controllers()
+		.into_iter()
+		.map(|v| (v, Staking::eras_stakers(era, &v)))
+		.collect::<Vec<_>>()
+}
+
 pub(crate) fn on_offence_in_era(
 	offenders: &[OffenceDetails<
 		AccountId,
@@ -927,3 +1028,13 @@ pub(crate) fn staking_events_since_last_call() -> Vec<crate::Event<Test>> {
 pub(crate) fn balances(who: &AccountId) -> (Balance, Balance) {
 	(asset::stakeable_balance::<Test>(who), Balances::reserved_balance(who))
 }
+
+pub(crate) fn to_bounded_supports(
+	supports: Vec<(AccountId, Support<AccountId>)>,
+) -> BoundedSupports<
+	AccountId,
+	<<Test as Config>::ElectionProvider as ElectionProvider>::MaxWinnersPerPage,
+	<<Test as Config>::ElectionProvider as ElectionProvider>::MaxBackersPerWinner,
+> {
+	supports.try_into().unwrap()
+}
diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs
index 8c3ff23315a..8ca018c7d8b 100644
--- a/substrate/frame/staking/src/pallet/impls.rs
+++ b/substrate/frame/staking/src/pallet/impls.rs
@@ -18,9 +18,8 @@
 //! Implementations for the Staking FRAME Pallet.
 
 use frame_election_provider_support::{
-	bounds::{CountBound, SizeBound},
-	data_provider, BoundedSupportsOf, DataProviderBounds, ElectionDataProvider, ElectionProvider,
-	ScoreProvider, SortedListProvider, VoteWeight, VoterOf,
+	bounds::CountBound, data_provider, BoundedSupportsOf, DataProviderBounds, ElectionDataProvider,
+	ElectionProvider, PageIndex, ScoreProvider, SortedListProvider, VoteWeight, VoterOf,
 };
 use frame_support::{
 	defensive,
@@ -50,10 +49,10 @@ use sp_staking::{
 
 use crate::{
 	asset, election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo,
-	BalanceOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing, IndividualExposure,
-	LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota,
-	PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs,
-	STAKING_ID,
+	BalanceOf, BoundedExposuresOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing,
+	IndividualExposure, LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, MaxWinnersPerPageOf,
+	Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface,
+	SnapshotStatus, StakingLedger, ValidatorPrefs, STAKING_ID,
 };
 use alloc::{boxed::Box, vec, vec::Vec};
 
@@ -73,6 +72,20 @@ use sp_runtime::TryRuntimeError;
 const NPOS_MAX_ITERATIONS_COEFFICIENT: u32 = 2;
 
 impl<T: Config> Pallet<T> {
+	/// Fetches the number of pages configured by the election provider.
+	pub fn election_pages() -> u32 {
+		<<T as Config>::ElectionProvider as ElectionProvider>::Pages::get()
+	}
+
+	/// Clears up all election preparation metadata in storage.
+	pub(crate) fn clear_election_metadata() {
+		VoterSnapshotStatus::<T>::kill();
+		NextElectionPage::<T>::kill();
+		ElectableStashes::<T>::kill();
+		// TODO: crude weights, improve.
+		Self::register_weight(T::DbWeight::get().writes(3));
+	}
+
 	/// Fetches the ledger associated with a controller or stash account, if any.
 	pub fn ledger(account: StakingAccount<T::AccountId>) -> Result<StakingLedger<T>, Error<T>> {
 		StakingLedger::<T>::get(account)
@@ -231,13 +244,8 @@ impl<T: Config> Pallet<T> {
 		validator_stash: T::AccountId,
 		era: EraIndex,
 	) -> DispatchResultWithPostInfo {
-		let controller = Self::bonded(&validator_stash).ok_or_else(|| {
-			Error::<T>::NotStash.with_weight(T::WeightInfo::payout_stakers_alive_staked(0))
-		})?;
-
-		let ledger = Self::ledger(StakingAccount::Controller(controller))?;
-		let page = EraInfo::<T>::get_next_claimable_page(era, &validator_stash, &ledger)
-			.ok_or_else(|| {
+		let page =
+			EraInfo::<T>::get_next_claimable_page(era, &validator_stash).ok_or_else(|| {
 				Error::<T>::AlreadyClaimed
 					.with_weight(T::WeightInfo::payout_stakers_alive_staked(0))
 			})?;
@@ -257,6 +265,7 @@ impl<T: Config> Pallet<T> {
 		})?;
 
 		let history_depth = T::HistoryDepth::get();
+
 		ensure!(
 			era <= current_era && era >= current_era.saturating_sub(history_depth),
 			Error::<T>::InvalidEraToReward
@@ -292,13 +301,13 @@ impl<T: Config> Pallet<T> {
 
 		let stash = ledger.stash.clone();
 
-		if EraInfo::<T>::is_rewards_claimed_with_legacy_fallback(era, &ledger, &stash, page) {
+		if EraInfo::<T>::is_rewards_claimed(era, &stash, page) {
 			return Err(Error::<T>::AlreadyClaimed
 				.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)))
-		} else {
-			EraInfo::<T>::set_rewards_as_claimed(era, &stash, page);
 		}
 
+		EraInfo::<T>::set_rewards_as_claimed(era, &stash, page);
+
 		let exposure = EraInfo::<T>::get_paged_exposure(era, &stash, page).ok_or_else(|| {
 			Error::<T>::InvalidEraToReward
 				.with_weight(T::WeightInfo::payout_stakers_alive_staked(0))
@@ -348,7 +357,7 @@ impl<T: Config> Pallet<T> {
 			era_index: era,
 			validator_stash: stash.clone(),
 			page,
-			next: EraInfo::<T>::get_next_claimable_page(era, &stash, &ledger),
+			next: EraInfo::<T>::get_next_claimable_page(era, &stash),
 		});
 
 		let mut total_imbalance = PositiveImbalanceOf::<T>::zero();
@@ -444,6 +453,10 @@ impl<T: Config> Pallet<T> {
 	}
 
 	/// Plan a new session potentially trigger a new era.
+	///
+	/// Subsequent function calls in the happy path are as follows:
+	/// 1. `try_plan_new_era`
+	/// 2. `plan_new_era`
 	fn new_session(
 		session_index: SessionIndex,
 		is_genesis: bool,
@@ -461,9 +474,9 @@ impl<T: Config> Pallet<T> {
 			match ForceEra::<T>::get() {
 				// Will be set to `NotForcing` again if a new era has been triggered.
 				Forcing::ForceNew => (),
-				// Short circuit to `try_trigger_new_era`.
+				// Short circuit to `try_plan_new_era`.
 				Forcing::ForceAlways => (),
-				// Only go to `try_trigger_new_era` if deadline reached.
+				// Only go to `try_plan_new_era` if deadline reached.
 				Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => (),
 				_ => {
 					// Either `Forcing::ForceNone`,
@@ -473,7 +486,7 @@ impl<T: Config> Pallet<T> {
 			}
 
 			// New era.
-			let maybe_new_era_validators = Self::try_trigger_new_era(session_index, is_genesis);
+			let maybe_new_era_validators = Self::try_plan_new_era(session_index, is_genesis);
 			if maybe_new_era_validators.is_some() &&
 				matches!(ForceEra::<T>::get(), Forcing::ForceNew)
 			{
@@ -484,7 +497,7 @@ impl<T: Config> Pallet<T> {
 		} else {
 			// Set initial era.
 			log!(debug, "Starting the first era.");
-			Self::try_trigger_new_era(session_index, is_genesis)
+			Self::try_plan_new_era(session_index, is_genesis)
 		}
 	}
 
@@ -533,6 +546,7 @@ impl<T: Config> Pallet<T> {
 	fn start_era(start_session: SessionIndex) {
 		let active_era = ActiveEra::<T>::mutate(|active_era| {
 			let new_index = active_era.as_ref().map(|info| info.index + 1).unwrap_or(0);
+			log!(debug, "starting active era {:?}", new_index);
 			*active_era = Some(ActiveEraInfo {
 				index: new_index,
 				// Set new active era start in next `on_finalize`. To guarantee usage of `Time`
@@ -604,69 +618,78 @@ impl<T: Config> Pallet<T> {
 		}
 	}
 
-	/// Plan a new era.
+	/// Helper function provided to other pallets that want to rely on pallet-stkaing for
+	/// testing/benchmarking, and wish to populate `ElectableStashes`, such that a next call (post
+	/// genesis) to `try_plan_new_era` works.
 	///
-	/// * Bump the current era storage (which holds the latest planned era).
-	/// * Store start session index for the new planned era.
-	/// * Clean old era information.
-	/// * Store staking information for the new planned era
-	///
-	/// Returns the new validator set.
-	pub fn trigger_new_era(
-		start_session_index: SessionIndex,
-		exposures: BoundedVec<
-			(T::AccountId, Exposure<T::AccountId, BalanceOf<T>>),
-			MaxWinnersOf<T>,
-		>,
-	) -> BoundedVec<T::AccountId, MaxWinnersOf<T>> {
-		// Increment or set current era.
-		let new_planned_era = CurrentEra::<T>::mutate(|s| {
-			*s = Some(s.map(|s| s + 1).unwrap_or(0));
-			s.unwrap()
-		});
-		ErasStartSessionIndex::<T>::insert(&new_planned_era, &start_session_index);
-
-		// Clean old era information.
-		if let Some(old_era) = new_planned_era.checked_sub(T::HistoryDepth::get() + 1) {
-			Self::clear_era_information(old_era);
-		}
-
-		// Set staking information for the new era.
-		Self::store_stakers_info(exposures, new_planned_era)
+	/// This uses `GenesisElectionProvider` which should always be set to something reasonable and
+	/// instant.
+	pub fn populate_staking_election_testing_benchmarking_only() -> Result<(), &'static str> {
+		let supports = <T::GenesisElectionProvider>::elect(Zero::zero()).map_err(|e| {
+			log!(warn, "genesis election provider failed due to {:?}", e);
+			"election failed"
+		})?;
+		Self::do_elect_paged_inner(supports).map_err(|_| "do_elect_paged_inner")?;
+		Ok(())
 	}
 
 	/// Potentially plan a new era.
 	///
-	/// Get election result from `T::ElectionProvider`.
+	/// The election results are either fetched directly from an election provider if it is the
+	/// "genesis" election or from a cached set of winners.
+	///
 	/// In case election result has more than [`MinimumValidatorCount`] validator trigger a new era.
 	///
 	/// In case a new era is planned, the new validator set is returned.
-	pub(crate) fn try_trigger_new_era(
+	pub(crate) fn try_plan_new_era(
 		start_session_index: SessionIndex,
 		is_genesis: bool,
 	) -> Option<BoundedVec<T::AccountId, MaxWinnersOf<T>>> {
-		let election_result: BoundedVec<_, MaxWinnersOf<T>> = if is_genesis {
-			let result = <T::GenesisElectionProvider>::elect().map_err(|e| {
+		// TODO: weights of this call path are rather crude, improve.
+		let validators: BoundedVec<T::AccountId, MaxWinnersOf<T>> = if is_genesis {
+			// genesis election only uses one election result page.
+			let result = <T::GenesisElectionProvider>::elect(Zero::zero()).map_err(|e| {
 				log!(warn, "genesis election provider failed due to {:?}", e);
 				Self::deposit_event(Event::StakingElectionFailed);
 			});
 
-			result
-				.ok()?
+			let exposures = Self::collect_exposures(result.ok().unwrap_or_default());
+
+			let validators = exposures
+				.iter()
+				.map(|(validator, _)| validator)
+				.cloned()
+				.try_collect()
+				.unwrap_or_default();
+
+			// set stakers info for genesis era (0).
+			let _ = Self::store_stakers_info(exposures, Zero::zero());
+
+			// consume full block weight to be safe.
+			Self::register_weight(sp_runtime::traits::Bounded::max_value());
+			validators
+		} else {
+			// note: exposures have already been processed and stored for each of the election
+			// solution page at the time of `elect_paged(page_index)`.
+			Self::register_weight(T::DbWeight::get().reads(1));
+			ElectableStashes::<T>::take()
 				.into_inner()
+				.into_iter()
+				.collect::<Vec<_>>()
 				.try_into()
-				// both bounds checked in integrity test to be equal
-				.defensive_unwrap_or_default()
-		} else {
-			let result = <T::ElectionProvider>::elect().map_err(|e| {
-				log!(warn, "election provider failed due to {:?}", e);
-				Self::deposit_event(Event::StakingElectionFailed);
-			});
-			result.ok()?
+				.expect("same bounds, will fit; qed.")
 		};
 
-		let exposures = Self::collect_exposures(election_result);
-		if (exposures.len() as u32) < MinimumValidatorCount::<T>::get().max(1) {
+		log!(
+			info,
+			"(is_genesis?: {:?}) electable validators count for session starting {:?}, era {:?}: {:?}",
+			is_genesis,
+			start_session_index,
+			CurrentEra::<T>::get().unwrap_or_default() + 1,
+			validators.len()
+		);
+
+		if (validators.len() as u32) < MinimumValidatorCount::<T>::get().max(1) {
 			// Session will panic if we ever return an empty validator set, thus max(1) ^^.
 			match CurrentEra::<T>::get() {
 				Some(current_era) if current_era > 0 => log!(
@@ -674,7 +697,7 @@ impl<T: Config> Pallet<T> {
 					"chain does not have enough staking candidates to operate for era {:?} ({} \
 					elected, minimum is {})",
 					CurrentEra::<T>::get().unwrap_or(0),
-					exposures.len(),
+					validators.len(),
 					MinimumValidatorCount::<T>::get(),
 				),
 				None => {
@@ -685,69 +708,186 @@ impl<T: Config> Pallet<T> {
 					CurrentEra::<T>::put(0);
 					ErasStartSessionIndex::<T>::insert(&0, &start_session_index);
 				},
-				_ => (),
+				_ => {},
 			}
-
+			// election failed, clear election prep metadata.
 			Self::deposit_event(Event::StakingElectionFailed);
-			return None
+			Self::clear_election_metadata();
+
+			None
+		} else {
+			Self::deposit_event(Event::StakersElected);
+			Self::clear_election_metadata();
+			Self::plan_new_era(start_session_index);
+
+			Some(validators)
 		}
+	}
 
-		Self::deposit_event(Event::StakersElected);
-		Some(Self::trigger_new_era(start_session_index, exposures))
+	/// Plan a new era.
+	///
+	/// * Bump the current era storage (which holds the latest planned era).
+	/// * Store start session index for the new planned era.
+	/// * Clean old era information.
+	///
+	/// The new validator set for this era is stored under `ElectableStashes`.
+	pub fn plan_new_era(start_session_index: SessionIndex) {
+		// Increment or set current era.
+		let new_planned_era = CurrentEra::<T>::mutate(|s| {
+			*s = Some(s.map(|s| s + 1).unwrap_or(0));
+			s.unwrap()
+		});
+		ErasStartSessionIndex::<T>::insert(&new_planned_era, &start_session_index);
+
+		// Clean old era information.
+		if let Some(old_era) = new_planned_era.checked_sub(T::HistoryDepth::get() + 1) {
+			log!(trace, "Removing era information for {:?}", old_era);
+			Self::clear_era_information(old_era);
+		}
 	}
 
-	/// Process the output of the election.
+	/// Paginated elect.
 	///
-	/// Store staking information for the new planned era
+	/// Fetches the election page with index `page` from the election provider.
+	///
+	/// The results from the elect call should be stored in the `ElectableStashes` storage. In
+	/// addition, it stores stakers' information for next planned era based on the paged solution
+	/// data returned.
+	///
+	/// If any new election winner does not fit in the electable stashes storage, it truncates the
+	/// result of the election. We ensure that only the winners that are part of the electable
+	/// stashes have exposures collected for the next era.
+	///
+	/// If `T::ElectionProvider::elect(_)`, we don't raise an error just yet and continue until
+	/// `elect(0)`. IFF `elect(0)` is called, yet we have not collected enough validators (as per
+	/// `MinimumValidatorCount` storage), an error is raised in the next era rotation.
+	pub(crate) fn do_elect_paged(page: PageIndex) -> Weight {
+		match T::ElectionProvider::elect(page) {
+			Ok(supports) => {
+				let supports_len = supports.len() as u32;
+				let inner_processing_results = Self::do_elect_paged_inner(supports);
+				if let Err(not_included) = inner_processing_results {
+					defensive!(
+						"electable stashes exceeded limit, unexpected but election proceeds.\
+                {} stashes from election result discarded",
+						not_included
+					);
+				};
+
+				Self::deposit_event(Event::PagedElectionProceeded {
+					page,
+					result: inner_processing_results.map(|x| x as u32).map_err(|x| x as u32),
+				});
+				T::WeightInfo::do_elect_paged_inner(supports_len)
+			},
+			Err(e) => {
+				log!(warn, "election provider page failed due to {:?} (page: {})", e, page);
+				Self::deposit_event(Event::PagedElectionProceeded { page, result: Err(0) });
+				// no-op -- no need to raise an error for now.
+				Default::default()
+			},
+		}
+	}
+
+	/// Inner implementation of [`Self::do_elect_paged`].
+	///
+	/// Returns an error if adding election winners to the electable stashes storage fails due to
+	/// exceeded bounds. In case of error, it returns the index of the first stash that failed to be
+	/// included.
+	pub(crate) fn do_elect_paged_inner(
+		mut supports: BoundedSupportsOf<T::ElectionProvider>,
+	) -> Result<usize, usize> {
+		// preparing the next era. Note: we expect `do_elect_paged` to be called *only* during a
+		// non-genesis era, thus current era should be set by now.
+		let planning_era = CurrentEra::<T>::get().defensive_unwrap_or_default().saturating_add(1);
+
+		match Self::add_electables(supports.iter().map(|(s, _)| s.clone())) {
+			Ok(added) => {
+				let exposures = Self::collect_exposures(supports);
+				let _ = Self::store_stakers_info(exposures, planning_era);
+				Ok(added)
+			},
+			Err(not_included_idx) => {
+				let not_included = supports.len().saturating_sub(not_included_idx);
+
+				log!(
+					warn,
+					"not all winners fit within the electable stashes, excluding {:?} accounts from solution.",
+					not_included,
+				);
+
+				// filter out supports of stashes that do not fit within the electable stashes
+				// storage bounds to prevent collecting their exposures.
+				supports.truncate(not_included_idx);
+				let exposures = Self::collect_exposures(supports);
+				let _ = Self::store_stakers_info(exposures, planning_era);
+
+				Err(not_included)
+			},
+		}
+	}
+
+	/// Process the output of a paged election.
+	///
+	/// Store staking information for the new planned era of a single election page.
 	pub fn store_stakers_info(
-		exposures: BoundedVec<
-			(T::AccountId, Exposure<T::AccountId, BalanceOf<T>>),
-			MaxWinnersOf<T>,
-		>,
+		exposures: BoundedExposuresOf<T>,
 		new_planned_era: EraIndex,
-	) -> BoundedVec<T::AccountId, MaxWinnersOf<T>> {
-		// Populate elected stash, stakers, exposures, and the snapshot of validator prefs.
-		let mut total_stake: BalanceOf<T> = Zero::zero();
-		let mut elected_stashes = Vec::with_capacity(exposures.len());
+	) -> BoundedVec<T::AccountId, MaxWinnersPerPageOf<T::ElectionProvider>> {
+		// populate elected stash, stakers, exposures, and the snapshot of validator prefs.
+		let mut total_stake_page: BalanceOf<T> = Zero::zero();
+		let mut elected_stashes_page = Vec::with_capacity(exposures.len());
+		let mut total_backers = 0u32;
 
 		exposures.into_iter().for_each(|(stash, exposure)| {
-			// build elected stash
-			elected_stashes.push(stash.clone());
-			// accumulate total stake
-			total_stake = total_stake.saturating_add(exposure.total);
-			// store staker exposure for this era
-			EraInfo::<T>::set_exposure(new_planned_era, &stash, exposure);
+			log!(
+				trace,
+				"stored exposure for stash {:?} and {:?} backers",
+				stash,
+				exposure.others.len()
+			);
+			// build elected stash.
+			elected_stashes_page.push(stash.clone());
+			// accumulate total stake.
+			total_stake_page = total_stake_page.saturating_add(exposure.total);
+			// set or update staker exposure for this era.
+			total_backers += exposure.others.len() as u32;
+			EraInfo::<T>::upsert_exposure(new_planned_era, &stash, exposure);
 		});
 
-		let elected_stashes: BoundedVec<_, MaxWinnersOf<T>> = elected_stashes
-			.try_into()
-			.expect("elected_stashes.len() always equal to exposures.len(); qed");
+		let elected_stashes: BoundedVec<_, MaxWinnersPerPageOf<T::ElectionProvider>> =
+			elected_stashes_page
+				.try_into()
+				.expect("both types are bounded by MaxWinnersPerPageOf; qed");
 
-		EraInfo::<T>::set_total_stake(new_planned_era, total_stake);
+		// adds to total stake in this era.
+		EraInfo::<T>::add_total_stake(new_planned_era, total_stake_page);
 
-		// Collect the pref of all winners.
+		// collect or update the pref of all winners.
 		for stash in &elected_stashes {
 			let pref = Validators::<T>::get(stash);
 			<ErasValidatorPrefs<T>>::insert(&new_planned_era, stash, pref);
 		}
 
-		if new_planned_era > 0 {
-			log!(
-				info,
-				"new validator set of size {:?} has been processed for era {:?}",
-				elected_stashes.len(),
-				new_planned_era,
-			);
-		}
+		log!(
+			info,
+			"stored a page of stakers with {:?} validators and {:?} total backers for era {:?}",
+			elected_stashes.len(),
+			total_backers,
+			new_planned_era,
+		);
 
 		elected_stashes
 	}
 
 	/// Consume a set of [`BoundedSupports`] from [`sp_npos_elections`] and collect them into a
 	/// [`Exposure`].
-	fn collect_exposures(
+	///
+	/// Returns vec of all the exposures of a validator in `paged_supports`, bounded by the number
+	/// of max winners per page returned by the election provider.
+	pub(crate) fn collect_exposures(
 		supports: BoundedSupportsOf<T::ElectionProvider>,
-	) -> BoundedVec<(T::AccountId, Exposure<T::AccountId, BalanceOf<T>>), MaxWinnersOf<T>> {
+	) -> BoundedExposuresOf<T> {
 		let total_issuance = asset::total_issuance::<T>();
 		let to_currency = |e: frame_election_provider_support::ExtendedBalance| {
 			T::CurrencyToVote::to_currency(e, total_issuance)
@@ -766,6 +906,7 @@ impl<T: Config> Pallet<T> {
 					.map(|(nominator, weight)| (nominator, to_currency(weight)))
 					.for_each(|(nominator, stake)| {
 						if nominator == validator {
+							defensive_assert!(own == Zero::zero(), "own stake should be unique");
 							own = own.saturating_add(stake);
 						} else {
 							others.push(IndividualExposure { who: nominator, value: stake });
@@ -780,6 +921,28 @@ impl<T: Config> Pallet<T> {
 			.expect("we only map through support vector which cannot change the size; qed")
 	}
 
+	/// Adds a new set of stashes to the electable stashes.
+	///
+	/// Returns:
+	///
+	/// `Ok(newly_added)` if all stashes were added successfully.
+	/// `Err(first_un_included)` if some stashes cannot be added due to bounds.
+	pub(crate) fn add_electables(
+		new_stashes: impl Iterator<Item = T::AccountId>,
+	) -> Result<usize, usize> {
+		ElectableStashes::<T>::mutate(|electable| {
+			let pre_size = electable.len();
+
+			for (idx, stash) in new_stashes.enumerate() {
+				if electable.try_insert(stash).is_err() {
+					return Err(idx);
+				}
+			}
+
+			Ok(electable.len() - pre_size)
+		})
+	}
+
 	/// Remove all associated data of a stash account from the staking system.
 	///
 	/// Assumes storage is upgraded before calling.
@@ -804,11 +967,7 @@ impl<T: Config> Pallet<T> {
 	pub(crate) fn clear_era_information(era_index: EraIndex) {
 		// FIXME: We can possibly set a reasonable limit since we do this only once per era and
 		// clean up state across multiple blocks.
-		let mut cursor = <ErasStakers<T>>::clear_prefix(era_index, u32::MAX, None);
-		debug_assert!(cursor.maybe_cursor.is_none());
-		cursor = <ErasStakersClipped<T>>::clear_prefix(era_index, u32::MAX, None);
-		debug_assert!(cursor.maybe_cursor.is_none());
-		cursor = <ErasValidatorPrefs<T>>::clear_prefix(era_index, u32::MAX, None);
+		let mut cursor = <ErasValidatorPrefs<T>>::clear_prefix(era_index, u32::MAX, None);
 		debug_assert!(cursor.maybe_cursor.is_none());
 		cursor = <ClaimedRewards<T>>::clear_prefix(era_index, u32::MAX, None);
 		debug_assert!(cursor.maybe_cursor.is_none());
@@ -873,7 +1032,7 @@ impl<T: Config> Pallet<T> {
 		stash: T::AccountId,
 		exposure: Exposure<T::AccountId, BalanceOf<T>>,
 	) {
-		EraInfo::<T>::set_exposure(current_era, &stash, exposure);
+		EraInfo::<T>::upsert_exposure(current_era, &stash, exposure);
 	}
 
 	#[cfg(feature = "runtime-benchmarks")]
@@ -881,23 +1040,29 @@ impl<T: Config> Pallet<T> {
 		SlashRewardFraction::<T>::put(fraction);
 	}
 
-	/// Get all of the voters that are eligible for the npos election.
+	/// Get all the voters associated with `page` that are eligible for the npos election.
 	///
-	/// `maybe_max_len` can imposes a cap on the number of voters returned;
+	/// `maybe_max_len` can impose a cap on the number of voters returned per page.
 	///
 	/// Sets `MinimumActiveStake` to the minimum active nominator stake in the returned set of
 	/// nominators.
 	///
+	/// Note: in the context of the multi-page snapshot, we expect the *order* of `VoterList` and
+	/// `TargetList` not to change while the pages are being processed.
+	///
 	/// This function is self-weighing as [`DispatchClass::Mandatory`].
-	pub fn get_npos_voters(bounds: DataProviderBounds) -> Vec<VoterOf<Self>> {
+	pub(crate) fn get_npos_voters(
+		bounds: DataProviderBounds,
+		status: &SnapshotStatus<T::AccountId>,
+	) -> Vec<VoterOf<Self>> {
 		let mut voters_size_tracker: StaticTracker<Self> = StaticTracker::default();
 
-		let final_predicted_len = {
+		let page_len_prediction = {
 			let all_voter_count = T::VoterList::count();
 			bounds.count.unwrap_or(all_voter_count.into()).min(all_voter_count.into()).0
 		};
 
-		let mut all_voters = Vec::<_>::with_capacity(final_predicted_len as usize);
+		let mut all_voters = Vec::<_>::with_capacity(page_len_prediction as usize);
 
 		// cache a few things.
 		let weight_of = Self::weight_of_fn();
@@ -907,9 +1072,18 @@ impl<T: Config> Pallet<T> {
 		let mut nominators_taken = 0u32;
 		let mut min_active_stake = u64::MAX;
 
-		let mut sorted_voters = T::VoterList::iter();
-		while all_voters.len() < final_predicted_len as usize &&
-			voters_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * final_predicted_len as u32)
+		let mut sorted_voters = match status {
+			// start the snapshot processing from the beginning.
+			SnapshotStatus::Waiting => T::VoterList::iter(),
+			// snapshot continues, start from the last iterated voter in the list.
+			SnapshotStatus::Ongoing(account_id) => T::VoterList::iter_from(&account_id)
+				.defensive_unwrap_or(Box::new(vec![].into_iter())),
+			// all voters have been consumed already, return an empty iterator.
+			SnapshotStatus::Consumed => Box::new(vec![].into_iter()),
+		};
+
+		while all_voters.len() < page_len_prediction as usize &&
+			voters_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * page_len_prediction as u32)
 		{
 			let voter = match sorted_voters.next() {
 				Some(voter) => {
@@ -944,6 +1118,7 @@ impl<T: Config> Pallet<T> {
 					all_voters.push(voter);
 					nominators_taken.saturating_inc();
 				} else {
+					defensive!("non-nominator fetched from voter list: {:?}", voter);
 					// technically should never happen, but not much we can do about it.
 				}
 				min_active_stake =
@@ -974,15 +1149,17 @@ impl<T: Config> Pallet<T> {
 				// `T::NominationsQuota::get_quota`. The latter can rarely happen, and is not
 				// really an emergency or bug if it does.
 				defensive!(
-				    "DEFENSIVE: invalid item in `VoterList`: {:?}, this nominator probably has too many nominations now",
+				    "invalid item in `VoterList`: {:?}, this nominator probably has too many nominations now",
                     voter,
                 );
 			}
 		}
 
 		// all_voters should have not re-allocated.
-		debug_assert!(all_voters.capacity() == final_predicted_len as usize);
+		debug_assert!(all_voters.capacity() == page_len_prediction as usize);
 
+		// TODO remove this and further instances of this, it will now be recorded in the EPM-MB
+		// pallet.
 		Self::register_weight(T::WeightInfo::get_npos_voters(validators_taken, nominators_taken));
 
 		let min_active_stake: T::CurrencyBalance =
@@ -990,18 +1167,12 @@ impl<T: Config> Pallet<T> {
 
 		MinimumActiveStake::<T>::put(min_active_stake);
 
-		log!(
-			info,
-			"generated {} npos voters, {} from validators and {} nominators",
-			all_voters.len(),
-			validators_taken,
-			nominators_taken
-		);
-
 		all_voters
 	}
 
-	/// Get the targets for an upcoming npos election.
+	/// Get all the targets associated are eligible for the npos election.
+	///
+	/// The target snapshot is *always* single paged.
 	///
 	/// This function is self-weighing as [`DispatchClass::Mandatory`].
 	pub fn get_npos_targets(bounds: DataProviderBounds) -> Vec<T::AccountId> {
@@ -1029,6 +1200,7 @@ impl<T: Config> Pallet<T> {
 
 			if targets_size_tracker.try_register_target(target.clone(), &bounds).is_err() {
 				// no more space left for the election snapshot, stop iterating.
+				log!(warn, "npos targets size exceeded, stopping iteration.");
 				Self::deposit_event(Event::<T>::SnapshotTargetsSizeExceeded {
 					size: targets_size_tracker.size as u32,
 				});
@@ -1041,7 +1213,7 @@ impl<T: Config> Pallet<T> {
 		}
 
 		Self::register_weight(T::WeightInfo::get_npos_targets(all_targets.len() as u32));
-		log!(info, "generated {} npos targets", all_targets.len());
+		log!(info, "[bounds {:?}] generated {} npos targets", bounds, all_targets.len());
 
 		all_targets
 	}
@@ -1150,9 +1322,10 @@ impl<T: Config> Pallet<T> {
 
 	/// Returns full exposure of a validator for a given era.
 	///
-	/// History note: This used to be a getter for old storage item `ErasStakers` deprecated in v14.
-	/// Since this function is used in the codebase at various places, we kept it as a custom getter
-	/// that takes care of getting the full exposure of the validator in a backward compatible way.
+	/// History note: This used to be a getter for old storage item `ErasStakers` deprecated in v14
+	/// and deleted in v17. Since this function is used in the codebase at various places, we kept
+	/// it as a custom getter that takes care of getting the full exposure of the validator in a
+	/// backward compatible way.
 	pub fn eras_stakers(
 		era: EraIndex,
 		account: &T::AccountId,
@@ -1260,6 +1433,13 @@ impl<T: Config> Pallet<T> {
 	}
 }
 
+// TODO: this is a very bad design. A hack for now so we can do benchmarks. Once
+// `next_election_prediction` is reworked based on rc-client, get rid of it. For now, just know that
+// the only fn that can set this is only accessible in runtime benchmarks.
+frame_support::parameter_types! {
+	pub storage BenchmarkNextElection: Option<u32> = None;
+}
+
 impl<T: Config> ElectionDataProvider for Pallet<T> {
 	type AccountId = T::AccountId;
 	type BlockNumber = BlockNumberFor<T>;
@@ -1270,36 +1450,92 @@ impl<T: Config> ElectionDataProvider for Pallet<T> {
 		Ok(ValidatorCount::<T>::get())
 	}
 
-	fn electing_voters(bounds: DataProviderBounds) -> data_provider::Result<Vec<VoterOf<Self>>> {
-		// This can never fail -- if `maybe_max_len` is `Some(_)` we handle it.
-		let voters = Self::get_npos_voters(bounds);
+	fn electing_voters(
+		bounds: DataProviderBounds,
+		page: PageIndex,
+	) -> data_provider::Result<Vec<VoterOf<Self>>> {
+		let mut status = VoterSnapshotStatus::<T>::get();
+		let voters = Self::get_npos_voters(bounds, &status);
+
+		// update the voter snapshot status.
+		match (page, &status) {
+			// last page, reset status for next round.
+			(0, _) => status = SnapshotStatus::Waiting,
+
+			(_, SnapshotStatus::Waiting) | (_, SnapshotStatus::Ongoing(_)) => {
+				let maybe_last = voters.last().map(|(x, _, _)| x).cloned();
+
+				if let Some(ref last) = maybe_last {
+					if maybe_last == T::VoterList::iter().last() {
+						// all voters in the voter list have been consumed.
+						status = SnapshotStatus::Consumed;
+					} else {
+						status = SnapshotStatus::Ongoing(last.clone());
+					}
+				}
+			},
+			// do nothing.
+			(_, SnapshotStatus::Consumed) => (),
+		}
+		log!(
+			info,
+			"[page {}, status {:?} (stake?: {:?}), bounds {:?}] generated {} npos voters",
+			page,
+			VoterSnapshotStatus::<T>::get(),
+			if let SnapshotStatus::Ongoing(x) = VoterSnapshotStatus::<T>::get() {
+				Self::weight_of(&x)
+			} else {
+				Zero::zero()
+			},
+			bounds,
+			voters.len(),
+		);
+		VoterSnapshotStatus::<T>::put(status);
+
+		debug_assert!(!bounds.slice_exhausted(&voters));
 
-		debug_assert!(!bounds.exhausted(
-			SizeBound(voters.encoded_size() as u32).into(),
-			CountBound(voters.len() as u32).into()
-		));
+		Ok(voters)
+	}
 
+	fn electing_voters_stateless(
+		bounds: DataProviderBounds,
+	) -> data_provider::Result<Vec<VoterOf<Self>>> {
+		let voters = Self::get_npos_voters(bounds, &SnapshotStatus::Waiting);
+		log!(
+			info,
+			"[stateless, status {:?}, bounds {:?}] generated {} npos voters",
+			VoterSnapshotStatus::<T>::get(),
+			bounds,
+			voters.len(),
+		);
 		Ok(voters)
 	}
 
-	fn electable_targets(bounds: DataProviderBounds) -> data_provider::Result<Vec<T::AccountId>> {
-		let targets = Self::get_npos_targets(bounds);
+	fn electable_targets(
+		bounds: DataProviderBounds,
+		page: PageIndex,
+	) -> data_provider::Result<Vec<T::AccountId>> {
+		if page > 0 {
+			log!(warn, "multi-page target snapshot not supported, returning page 0.");
+		}
 
+		let targets = Self::get_npos_targets(bounds);
 		// We can't handle this case yet -- return an error. WIP to improve handling this case in
 		// <https://github.com/paritytech/substrate/pull/13195>.
-		if bounds.exhausted(None, CountBound(T::TargetList::count() as u32).into()) {
+		if bounds.exhausted(None, CountBound(targets.len() as u32).into()) {
 			return Err("Target snapshot too big")
 		}
 
-		debug_assert!(!bounds.exhausted(
-			SizeBound(targets.encoded_size() as u32).into(),
-			CountBound(targets.len() as u32).into()
-		));
+		debug_assert!(!bounds.slice_exhausted(&targets));
 
 		Ok(targets)
 	}
 
 	fn next_election_prediction(now: BlockNumberFor<T>) -> BlockNumberFor<T> {
+		if let Some(override_value) = BenchmarkNextElection::get() {
+			return override_value.into()
+		}
+
 		let current_era = CurrentEra::<T>::get().unwrap_or(0);
 		let current_session = CurrentPlannedSession::<T>::get();
 		let current_era_start_session_index =
@@ -1327,11 +1563,33 @@ impl<T: Config> ElectionDataProvider for Pallet<T> {
 				.into(),
 		};
 
+		// TODO: this is somewhat temp hack to fix this issue:
+		// in the new multi-block staking model, we finish the election one block before the session
+		// ends. In this very last block, we don't want to tell EP that the next election is in one
+		// blocks, but rather in a whole era from now. For simplification, while we are
+		// mid-election,we always point to one era later.
+		//
+		// This whole code path has to change when we move to the rc-client model.
+		if !ElectableStashes::<T>::get().is_empty() {
+			log!(debug, "we are mid-election, pointing to next era as election prediction.");
+			return now.saturating_add(
+				BlockNumberFor::<T>::from(T::SessionsPerEra::get()) * session_length,
+			)
+		}
+
 		now.saturating_add(
 			until_this_session_end.saturating_add(sessions_left.saturating_mul(session_length)),
 		)
 	}
 
+	#[cfg(feature = "runtime-benchmarks")]
+	fn set_next_election(to: u32) {
+		frame_benchmarking::benchmarking::add_to_whitelist(
+			BenchmarkNextElection::key().to_vec().into(),
+		);
+		BenchmarkNextElection::set(&Some(to));
+	}
+
 	#[cfg(feature = "runtime-benchmarks")]
 	fn add_voter(
 		voter: T::AccountId,
@@ -1349,7 +1607,7 @@ impl<T: Config> ElectionDataProvider for Pallet<T> {
 
 	#[cfg(feature = "runtime-benchmarks")]
 	fn add_target(target: T::AccountId) {
-		let stake = MinValidatorBond::<T>::get() * 100u32.into();
+		let stake = (MinValidatorBond::<T>::get() + 1u32.into()) * 100u32.into();
 		<Bonded<T>>::insert(target.clone(), target.clone());
 		<Ledger<T>>::insert(target.clone(), StakingLedger::<T>::new(target.clone(), stake));
 		Self::do_add_validator(
@@ -1402,6 +1660,11 @@ impl<T: Config> ElectionDataProvider for Pallet<T> {
 			);
 		});
 	}
+
+	#[cfg(feature = "runtime-benchmarks")]
+	fn set_desired_targets(count: u32) {
+		ValidatorCount::<T>::put(count);
+	}
 }
 
 /// In this implementation `new_session(session)` must be called before `end_session(session-1)`
@@ -1410,6 +1673,15 @@ impl<T: Config> ElectionDataProvider for Pallet<T> {
 /// Once the first new_session is planned, all session must start and then end in order, though
 /// some session can lag in between the newest session planned and the latest session started.
 impl<T: Config> pallet_session::SessionManager<T::AccountId> for Pallet<T> {
+	// └── Self::new_session(new_index, false)
+	//	└── Self::try_plan_new_era(session_index, is_genesis)
+	//    └── T::GenesisElectionProvider::elect() OR ElectableStashes::<T>::take()
+	//    └── Self::collect_exposures()
+	//    └── Self::store_stakers_info()
+	//    └── Self::plan_new_era()
+	//        └── CurrentEra increment
+	//        └── ErasStartSessionIndex update
+	//        └── Self::clear_era_information()
 	fn new_session(new_index: SessionIndex) -> Option<Vec<T::AccountId>> {
 		log!(trace, "planning new session {}", new_index);
 		CurrentPlannedSession::<T>::put(new_index);
@@ -1420,6 +1692,19 @@ impl<T: Config> pallet_session::SessionManager<T::AccountId> for Pallet<T> {
 		CurrentPlannedSession::<T>::put(new_index);
 		Self::new_session(new_index, true).map(|v| v.into_inner())
 	}
+	// start_session(start_session: SessionIndex)
+	//	└── Check if this is the start of next active era
+	//	└── Self::start_era(start_session)
+	//		└── Update active era index
+	//		└── Set active era start timestamp
+	//		└── Update BondedEras
+	//		└── Self::apply_unapplied_slashes()
+	//			└── Get slashes for era from UnappliedSlashes
+	//			└── Apply each slash
+	//			└── Clear slashes metadata
+	//	└── Process disabled validators
+	//	└── Get all disabled validators
+	//	└── Call T::SessionInterface::disable_validator() for each
 	fn start_session(start_index: SessionIndex) {
 		log!(trace, "starting session {}", start_index);
 		Self::start_session(start_index)
@@ -1909,7 +2194,7 @@ impl<T: Config> StakingInterface for Pallet<T> {
 	}
 
 	fn election_ongoing() -> bool {
-		T::ElectionProvider::ongoing()
+		<T::ElectionProvider as ElectionProvider>::ongoing()
 	}
 
 	fn force_unstake(who: Self::AccountId) -> sp_runtime::DispatchResult {
@@ -1919,13 +2204,6 @@ impl<T: Config> StakingInterface for Pallet<T> {
 	}
 
 	fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool {
-		// look in the non paged exposures
-		// FIXME: Can be cleaned up once non paged exposures are cleared (https://github.com/paritytech/polkadot-sdk/issues/433)
-		ErasStakers::<T>::iter_prefix(era).any(|(validator, exposures)| {
-			validator == *who || exposures.others.iter().any(|i| i.who == *who)
-		})
-			||
-		// look in the paged exposures
 		ErasStakersPaged::<T>::iter_prefix((era,)).any(|((validator, _), exposure_page)| {
 			validator == *who || exposure_page.others.iter().any(|i| i.who == *who)
 		})
@@ -1982,7 +2260,7 @@ impl<T: Config> StakingInterface for Pallet<T> {
 				.map(|(who, value)| IndividualExposure { who: who.clone(), value: *value })
 				.collect::<Vec<_>>();
 			let exposure = Exposure { total: Default::default(), own: Default::default(), others };
-			EraInfo::<T>::set_exposure(*current_era, stash, exposure);
+			EraInfo::<T>::upsert_exposure(*current_era, stash, exposure);
 		}
 
 		fn set_current_era(era: EraIndex) {
@@ -2041,23 +2319,55 @@ impl<T: Config> sp_staking::StakingUnchecked for Pallet<T> {
 
 #[cfg(any(test, feature = "try-runtime"))]
 impl<T: Config> Pallet<T> {
-	pub(crate) fn do_try_state(_: BlockNumberFor<T>) -> Result<(), TryRuntimeError> {
+	pub(crate) fn do_try_state(now: BlockNumberFor<T>) -> Result<(), TryRuntimeError> {
 		ensure!(
 			T::VoterList::iter()
 				.all(|x| <Nominators<T>>::contains_key(&x) || <Validators<T>>::contains_key(&x)),
 			"VoterList contains non-staker"
 		);
 
+		Self::ensure_snapshot_metadata_state(now)?;
 		Self::check_ledgers()?;
 		Self::check_bonded_consistency()?;
 		Self::check_payees()?;
 		Self::check_nominators()?;
-		Self::check_exposures()?;
 		Self::check_paged_exposures()?;
 		Self::check_count()?;
 		Self::ensure_disabled_validators_sorted()
 	}
 
+	/// Test invariants of:
+	///
+	/// - `NextElectionPage`: should only be set if pages > 1 and if we are within `pages-election
+	///   -> election`
+	/// - `VoterSnapshotStatus`: cannot be argued about as we don't know when we get a call to data
+	///   provider, but we know it should never be set if we have 1 page.
+	///
+	/// -- SHOULD ONLY BE CALLED AT THE END OF A GIVEN BLOCK.
+	pub fn ensure_snapshot_metadata_state(now: BlockNumberFor<T>) -> Result<(), TryRuntimeError> {
+		let next_election = Self::next_election_prediction(now);
+		let pages = Self::election_pages().saturated_into::<BlockNumberFor<T>>();
+		let election_prep_start = next_election - pages;
+
+		if pages > One::one() && now >= election_prep_start {
+			ensure!(
+				NextElectionPage::<T>::get().is_some() || next_election == now + One::one(),
+				"NextElectionPage should be set mid election, except for last block"
+			);
+		} else if pages == One::one() {
+			ensure!(
+				NextElectionPage::<T>::get().is_none(),
+				"NextElectionPage should not be set mid election"
+			);
+			ensure!(
+				VoterSnapshotStatus::<T>::get() == SnapshotStatus::Waiting,
+				"VoterSnapshotStatus should not be set mid election"
+			);
+		}
+
+		Ok(())
+	}
+
 	/// Invariants:
 	/// * A controller should not be associated with more than one ledger.
 	/// * A bonded (stash, controller) pair should have only one associated ledger. I.e. if the
@@ -2149,11 +2459,13 @@ impl<T: Config> Pallet<T> {
 			<T as Config>::TargetList::count() == Validators::<T>::count(),
 			"wrong external count"
 		);
+		let max_validators_bound = MaxWinnersOf::<T>::get();
+		let max_winners_per_page_bound = MaxWinnersPerPageOf::<T::ElectionProvider>::get();
 		ensure!(
-			ValidatorCount::<T>::get() <=
-				<T::ElectionProvider as frame_election_provider_support::ElectionProviderBase>::MaxWinners::get(),
-			Error::<T>::TooManyValidators
+			max_validators_bound >= max_winners_per_page_bound,
+			"max validators should be higher than per page bounds"
 		);
+		ensure!(ValidatorCount::<T>::get() <= max_validators_bound, Error::<T>::TooManyValidators);
 		Ok(())
 	}
 
@@ -2210,27 +2522,6 @@ impl<T: Config> Pallet<T> {
 		Ok(())
 	}
 
-	/// Invariants:
-	/// * For each era exposed validator, check if the exposure total is sane (exposure.total  =
-	/// exposure.own + exposure.own).
-	fn check_exposures() -> Result<(), TryRuntimeError> {
-		let era = ActiveEra::<T>::get().unwrap().index;
-		ErasStakers::<T>::iter_prefix_values(era)
-			.map(|expo| {
-				ensure!(
-					expo.total ==
-						expo.own +
-							expo.others
-								.iter()
-								.map(|e| e.value)
-								.fold(Zero::zero(), |acc, x| acc + x),
-					"wrong total exposure.",
-				);
-				Ok(())
-			})
-			.collect::<Result<(), TryRuntimeError>>()
-	}
-
 	/// Invariants:
 	/// * For each paged era exposed validator, check if the exposure total is sane (exposure.total
 	/// = exposure.own + exposure.own).
diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs
index 9d891462739..7d22df148de 100644
--- a/substrate/frame/staking/src/pallet/mod.rs
+++ b/substrate/frame/staking/src/pallet/mod.rs
@@ -17,28 +17,33 @@
 
 //! Staking FRAME Pallet.
 
-use alloc::vec::Vec;
+use alloc::{format, vec::Vec};
 use codec::Codec;
-use frame_election_provider_support::{
-	ElectionProvider, ElectionProviderBase, SortedListProvider, VoteWeight,
-};
+use frame_election_provider_support::{ElectionProvider, SortedListProvider, VoteWeight};
 use frame_support::{
+	assert_ok,
 	pallet_prelude::*,
 	traits::{
 		fungible::{
 			hold::{Balanced as FunHoldBalanced, Mutate as FunHoldMutate},
-			Mutate as FunMutate,
+			Inspect, Mutate, Mutate as FunMutate,
 		},
 		Defensive, DefensiveSaturating, EnsureOrigin, EstimateNextNewSession, Get,
 		InspectLockableCurrency, OnUnbalanced, UnixTime,
 	},
 	weights::Weight,
-	BoundedVec,
+	BoundedBTreeSet, BoundedVec,
 };
 use frame_system::{ensure_root, ensure_signed, pallet_prelude::*};
+use rand::seq::SliceRandom;
+use rand_chacha::{
+	rand_core::{RngCore, SeedableRng},
+	ChaChaRng,
+};
+use sp_core::{sr25519::Pair as SrPair, Pair};
 use sp_runtime::{
 	traits::{SaturatedConversion, StaticLookup, Zero},
-	ArithmeticError, Perbill, Percent,
+	ArithmeticError, Perbill, Percent, Saturating,
 };
 
 use sp_staking::{
@@ -54,10 +59,10 @@ pub use impls::*;
 
 use crate::{
 	asset, slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf,
-	DisablingStrategy, EraPayout, EraRewardPoints, Exposure, ExposurePage, Forcing,
-	LedgerIntegrityState, MaxNominationsOf, NegativeImbalanceOf, Nominations, NominationsQuota,
-	PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, UnappliedSlash,
-	UnlockChunk, ValidatorPrefs,
+	DisablingStrategy, EraPayout, EraRewardPoints, ExposurePage, Forcing, LedgerIntegrityState,
+	MaxNominationsOf, NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf,
+	RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk,
+	ValidatorPrefs,
 };
 
 // The speculative number of spans are used as an input of the weight annotation of
@@ -67,12 +72,11 @@ pub(crate) const SPECULATIVE_NUM_SPANS: u32 = 32;
 
 #[frame_support::pallet]
 pub mod pallet {
-	use frame_election_provider_support::ElectionDataProvider;
-
-	use crate::{BenchmarkingConfig, PagedExposureMetadata};
-
 	use super::*;
 
+	use crate::{BenchmarkingConfig, PagedExposureMetadata, SnapshotStatus};
+	use frame_election_provider_support::{ElectionDataProvider, PageIndex};
+
 	/// The in-code storage version.
 	const STORAGE_VERSION: StorageVersion = StorageVersion::new(16);
 
@@ -157,6 +161,8 @@ pub mod pallet {
 			AccountId = Self::AccountId,
 			BlockNumber = BlockNumberFor<Self>,
 			DataProvider = Pallet<Self>,
+			MaxWinnersPerPage = <Self::ElectionProvider as ElectionProvider>::MaxWinnersPerPage,
+			MaxBackersPerWinner = <Self::ElectionProvider as ElectionProvider>::MaxBackersPerWinner,
 		>;
 
 		/// Something that defines the maximum number of nominations per nominator.
@@ -166,10 +172,9 @@ pub mod pallet {
 		/// Number of eras to keep in history.
 		///
 		/// Following information is kept for eras in `[current_era -
-		/// HistoryDepth, current_era]`: `ErasStakers`, `ErasStakersClipped`,
-		/// `ErasValidatorPrefs`, `ErasValidatorReward`, `ErasRewardPoints`,
-		/// `ErasTotalStake`, `ErasStartSessionIndex`, `ClaimedRewards`, `ErasStakersPaged`,
-		/// `ErasStakersOverview`.
+		/// HistoryDepth, current_era]`: `ErasValidatorPrefs`, `ErasValidatorReward`,
+		/// `ErasRewardPoints`, `ErasTotalStake`, `ErasStartSessionIndex`, `ClaimedRewards`,
+		/// `ErasStakersPaged`, `ErasStakersOverview`.
 		///
 		/// Must be more than the number of eras delayed by session.
 		/// I.e. active era must always be in history. I.e. `active_era >
@@ -253,6 +258,13 @@ pub mod pallet {
 		#[pallet::constant]
 		type MaxExposurePageSize: Get<u32>;
 
+		/// The absolute maximum of winner validators this pallet should return.
+		///
+		/// As this pallet supports multi-block election, the set of winner validators *per
+		/// election* is bounded by this type.
+		#[pallet::constant]
+		type MaxValidatorSet: Get<u32>;
+
 		/// Something that provides a best-effort sorted list of voters aka electing nominators,
 		/// used for NPoS election.
 		///
@@ -317,6 +329,14 @@ pub mod pallet {
 		#[pallet::no_default_bounds]
 		type DisablingStrategy: DisablingStrategy<Self>;
 
+		/// Maximum number of invulnerable validators.
+		#[pallet::constant]
+		type MaxInvulnerables: Get<u32>;
+
+		/// Maximum number of disabled validators.
+		#[pallet::constant]
+		type MaxDisabledValidators: Get<u32>;
+
 		/// Some parameters of the benchmarking.
 		#[cfg(feature = "std")]
 		type BenchmarkingConfig: BenchmarkingConfig;
@@ -371,7 +391,10 @@ pub mod pallet {
 			type NextNewSession = ();
 			type MaxExposurePageSize = ConstU32<64>;
 			type MaxUnlockingChunks = ConstU32<32>;
+			type MaxValidatorSet = ConstU32<100>;
 			type MaxControllersInDeprecationBatch = ConstU32<100>;
+			type MaxInvulnerables = ConstU32<20>;
+			type MaxDisabledValidators = ConstU32<100>;
 			type EventListeners = ();
 			type DisablingStrategy = crate::UpToLimitDisablingStrategy;
 			#[cfg(feature = "std")]
@@ -392,8 +415,8 @@ pub mod pallet {
 	/// easy to initialize and the performance hit is minimal (we expect no more than four
 	/// invulnerables) and restricted to testnets.
 	#[pallet::storage]
-	#[pallet::unbounded]
-	pub type Invulnerables<T: Config> = StorageValue<_, Vec<T::AccountId>, ValueQuery>;
+	pub type Invulnerables<T: Config> =
+		StorageValue<_, BoundedVec<T::AccountId, T::MaxInvulnerables>, ValueQuery>;
 
 	/// Map from all locked "stash" accounts to the controller account.
 	///
@@ -505,26 +528,6 @@ pub mod pallet {
 	#[pallet::storage]
 	pub type ErasStartSessionIndex<T> = StorageMap<_, Twox64Concat, EraIndex, SessionIndex>;
 
-	/// Exposure of validator at era.
-	///
-	/// This is keyed first by the era index to allow bulk deletion and then the stash account.
-	///
-	/// Is it removed after [`Config::HistoryDepth`] eras.
-	/// If stakers hasn't been set or has been removed then empty exposure is returned.
-	///
-	/// Note: Deprecated since v14. Use `EraInfo` instead to work with exposures.
-	#[pallet::storage]
-	#[pallet::unbounded]
-	pub type ErasStakers<T: Config> = StorageDoubleMap<
-		_,
-		Twox64Concat,
-		EraIndex,
-		Twox64Concat,
-		T::AccountId,
-		Exposure<T::AccountId, BalanceOf<T>>,
-		ValueQuery,
-	>;
-
 	/// Summary of validator exposure at a given era.
 	///
 	/// This contains the total stake in support of the validator and their own stake. In addition,
@@ -548,34 +551,6 @@ pub mod pallet {
 		OptionQuery,
 	>;
 
-	/// Clipped Exposure of validator at era.
-	///
-	/// Note: This is deprecated, should be used as read-only and will be removed in the future.
-	/// New `Exposure`s are stored in a paged manner in `ErasStakersPaged` instead.
-	///
-	/// This is similar to [`ErasStakers`] but number of nominators exposed is reduced to the
-	/// `T::MaxExposurePageSize` biggest stakers.
-	/// (Note: the field `total` and `own` of the exposure remains unchanged).
-	/// This is used to limit the i/o cost for the nominator payout.
-	///
-	/// This is keyed fist by the era index to allow bulk deletion and then the stash account.
-	///
-	/// It is removed after [`Config::HistoryDepth`] eras.
-	/// If stakers hasn't been set or has been removed then empty exposure is returned.
-	///
-	/// Note: Deprecated since v14. Use `EraInfo` instead to work with exposures.
-	#[pallet::storage]
-	#[pallet::unbounded]
-	pub type ErasStakersClipped<T: Config> = StorageDoubleMap<
-		_,
-		Twox64Concat,
-		EraIndex,
-		Twox64Concat,
-		T::AccountId,
-		Exposure<T::AccountId, BalanceOf<T>>,
-		ValueQuery,
-	>;
-
 	/// Paginated exposure of a validator at given era.
 	///
 	/// This is keyed first by the era index to allow bulk deletion, then stash account and finally
@@ -613,7 +588,7 @@ pub mod pallet {
 		ValueQuery,
 	>;
 
-	/// Similar to `ErasStakers`, this holds the preferences of validators.
+	/// Exposure of validator at era with the preferences of validators.
 	///
 	/// This is keyed first by the era index to allow bulk deletion and then the stash account.
 	///
@@ -741,9 +716,8 @@ pub mod pallet {
 	/// Additionally, each disabled validator is associated with an `OffenceSeverity` which
 	/// represents how severe is the offence that got the validator disabled.
 	#[pallet::storage]
-	#[pallet::unbounded]
 	pub type DisabledValidators<T: Config> =
-		StorageValue<_, Vec<(u32, OffenceSeverity)>, ValueQuery>;
+		StorageValue<_, BoundedVec<(u32, OffenceSeverity), T::MaxDisabledValidators>, ValueQuery>;
 
 	/// The threshold for when users can start calling `chill_other` for other validators /
 	/// nominators. The threshold is compared to the actual number of validators / nominators
@@ -751,12 +725,34 @@ pub mod pallet {
 	#[pallet::storage]
 	pub(crate) type ChillThreshold<T: Config> = StorageValue<_, Percent, OptionQuery>;
 
+	/// Voter snapshot progress status.
+	///
+	/// If the status is `Ongoing`, it keeps a cursor of the last voter retrieved to proceed when
+	/// creating the next snapshot page.
+	#[pallet::storage]
+	pub(crate) type VoterSnapshotStatus<T: Config> =
+		StorageValue<_, SnapshotStatus<T::AccountId>, ValueQuery>;
+
+	/// Keeps track of an ongoing multi-page election solution request.
+	///
+	/// If `Some(_)``, it is the next page that we intend to elect. If `None`, we are not in the
+	/// election process.
+	///
+	/// This is only set in multi-block elections. Should always be `None` otherwise.
+	#[pallet::storage]
+	pub(crate) type NextElectionPage<T: Config> = StorageValue<_, PageIndex, OptionQuery>;
+
+	/// A bounded list of the "electable" stashes that resulted from a successful election.
+	#[pallet::storage]
+	pub(crate) type ElectableStashes<T: Config> =
+		StorageValue<_, BoundedBTreeSet<T::AccountId, T::MaxValidatorSet>, ValueQuery>;
+
 	#[pallet::genesis_config]
 	#[derive(frame_support::DefaultNoBound)]
 	pub struct GenesisConfig<T: Config> {
 		pub validator_count: u32,
 		pub minimum_validator_count: u32,
-		pub invulnerables: Vec<T::AccountId>,
+		pub invulnerables: BoundedVec<T::AccountId, T::MaxInvulnerables>,
 		pub force_era: Forcing,
 		pub slash_reward_fraction: Perbill,
 		pub canceled_payout: BalanceOf<T>,
@@ -766,6 +762,39 @@ pub mod pallet {
 		pub min_validator_bond: BalanceOf<T>,
 		pub max_validator_count: Option<u32>,
 		pub max_nominator_count: Option<u32>,
+		/// Create the given number of validators and nominators.
+		///
+		/// These account need not be in the endowment list of balances, and are auto-topped up
+		/// here.
+		///
+		/// Useful for testing genesis config.
+		pub dev_stakers: Option<(u32, u32)>,
+	}
+
+	impl<T: Config> GenesisConfig<T> {
+		fn generate_endowed_bonded_account(
+			derivation: &str,
+			rng: &mut ChaChaRng,
+			min_validator_bond: BalanceOf<T>,
+		) -> T::AccountId {
+			let pair: SrPair = Pair::from_string(&derivation, None)
+				.expect(&format!("Failed to parse derivation string: {derivation}"));
+			let who = T::AccountId::decode(&mut &pair.public().encode()[..])
+				.expect(&format!("Failed to decode public key from pair: {:?}", pair.public()));
+
+			let stake = BalanceOf::<T>::from(rng.next_u64())
+				.max(T::Currency::minimum_balance())
+				.max(min_validator_bond);
+			let two: BalanceOf<T> = 2u64.into();
+
+			assert_ok!(T::Currency::mint_into(&who, stake * two));
+			assert_ok!(<Pallet<T>>::bond(
+				T::RuntimeOrigin::from(Some(who.clone()).into()),
+				stake,
+				RewardDestination::Staked,
+			));
+			who
+		}
 	}
 
 	#[pallet::genesis_build]
@@ -773,7 +802,11 @@ pub mod pallet {
 		fn build(&self) {
 			ValidatorCount::<T>::put(self.validator_count);
 			MinimumValidatorCount::<T>::put(self.minimum_validator_count);
-			Invulnerables::<T>::put(&self.invulnerables);
+			assert!(
+				self.invulnerables.len() as u32 <= T::MaxInvulnerables::get(),
+				"Too many invulnerable validators at genesis."
+			);
+			<Invulnerables<T>>::put(&self.invulnerables);
 			ForceEra::<T>::put(self.force_era);
 			CanceledSlashPayout::<T>::put(self.canceled_payout);
 			SlashRewardFraction::<T>::put(self.slash_reward_fraction);
@@ -798,12 +831,12 @@ pub mod pallet {
 					asset::free_to_stake::<T>(stash) >= balance,
 					"Stash does not have enough balance to bond."
 				);
-				frame_support::assert_ok!(<Pallet<T>>::bond(
+				assert_ok!(<Pallet<T>>::bond(
 					T::RuntimeOrigin::from(Some(stash.clone()).into()),
 					balance,
 					RewardDestination::Staked,
 				));
-				frame_support::assert_ok!(match status {
+				assert_ok!(match status {
 					crate::StakerStatus::Validator => <Pallet<T>>::validate(
 						T::RuntimeOrigin::from(Some(stash.clone()).into()),
 						Default::default(),
@@ -816,7 +849,8 @@ pub mod pallet {
 				});
 				assert!(
 					ValidatorCount::<T>::get() <=
-						<T::ElectionProvider as ElectionProviderBase>::MaxWinners::get()
+						<T::ElectionProvider as ElectionProvider>::MaxWinnersPerPage::get() *
+							<T::ElectionProvider as ElectionProvider>::Pages::get()
 				);
 			}
 
@@ -826,6 +860,58 @@ pub mod pallet {
 				Nominators::<T>::count() + Validators::<T>::count(),
 				"not all genesis stakers were inserted into sorted list provider, something is wrong."
 			);
+
+			// now generate the dev stakers, after all else is setup
+			if let Some((validators, nominators)) = self.dev_stakers {
+				crate::log!(
+					debug,
+					"generating dev stakers: validators: {}, nominators: {}",
+					validators,
+					nominators
+				);
+				let base_derivation = "//staker//{}";
+
+				// it is okay for the randomness to be the same on every call. If we want different,
+				// we can make `base_derivation` configurable.
+				let mut rng =
+					ChaChaRng::from_seed(base_derivation.using_encoded(sp_core::blake2_256));
+
+				let validators = (0..validators)
+					.map(|index| {
+						let derivation =
+							base_derivation.replace("{}", &format!("validator{}", index));
+						let who = Self::generate_endowed_bonded_account(
+							&derivation,
+							&mut rng,
+							self.min_validator_bond,
+						);
+						assert_ok!(<Pallet<T>>::validate(
+							T::RuntimeOrigin::from(Some(who.clone()).into()),
+							Default::default(),
+						));
+						who
+					})
+					.collect::<Vec<_>>();
+
+				(0..nominators).for_each(|index| {
+					let derivation = base_derivation.replace("{}", &format!("nominator{}", index));
+					let who = Self::generate_endowed_bonded_account(
+						&derivation,
+						&mut rng,
+						self.min_validator_bond,
+					);
+
+					let random_nominations = validators
+						.choose_multiple(&mut rng, MaxNominationsOf::<T>::get() as usize)
+						.map(|v| v.clone())
+						.collect::<Vec<_>>();
+
+					assert_ok!(<Pallet<T>>::nominate(
+						T::RuntimeOrigin::from(Some(who.clone()).into()),
+						random_nominations.iter().map(|l| T::Lookup::unlookup(l.clone())).collect(),
+					));
+				})
+			}
 		}
 	}
 
@@ -834,7 +920,11 @@ pub mod pallet {
 	pub enum Event<T: Config> {
 		/// The era payout has been set; the first balance is the validator-payout; the second is
 		/// the remainder from the maximum amount of reward.
-		EraPaid { era_index: EraIndex, validator_payout: BalanceOf<T>, remainder: BalanceOf<T> },
+		EraPaid {
+			era_index: EraIndex,
+			validator_payout: BalanceOf<T>,
+			remainder: BalanceOf<T>,
+		},
 		/// The nominator has been rewarded by this amount to this destination.
 		Rewarded {
 			stash: T::AccountId,
@@ -842,31 +932,54 @@ pub mod pallet {
 			amount: BalanceOf<T>,
 		},
 		/// A staker (validator or nominator) has been slashed by the given amount.
-		Slashed { staker: T::AccountId, amount: BalanceOf<T> },
+		Slashed {
+			staker: T::AccountId,
+			amount: BalanceOf<T>,
+		},
 		/// A slash for the given validator, for the given percentage of their stake, at the given
 		/// era as been reported.
-		SlashReported { validator: T::AccountId, fraction: Perbill, slash_era: EraIndex },
+		SlashReported {
+			validator: T::AccountId,
+			fraction: Perbill,
+			slash_era: EraIndex,
+		},
 		/// An old slashing report from a prior era was discarded because it could
 		/// not be processed.
-		OldSlashingReportDiscarded { session_index: SessionIndex },
+		OldSlashingReportDiscarded {
+			session_index: SessionIndex,
+		},
 		/// A new set of stakers was elected.
 		StakersElected,
 		/// An account has bonded this amount. \[stash, amount\]
 		///
 		/// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably,
 		/// it will not be emitted for staking rewards when they are added to stake.
-		Bonded { stash: T::AccountId, amount: BalanceOf<T> },
+		Bonded {
+			stash: T::AccountId,
+			amount: BalanceOf<T>,
+		},
 		/// An account has unbonded this amount.
-		Unbonded { stash: T::AccountId, amount: BalanceOf<T> },
+		Unbonded {
+			stash: T::AccountId,
+			amount: BalanceOf<T>,
+		},
 		/// An account has called `withdraw_unbonded` and removed unbonding chunks worth `Balance`
 		/// from the unlocking queue.
-		Withdrawn { stash: T::AccountId, amount: BalanceOf<T> },
+		Withdrawn {
+			stash: T::AccountId,
+			amount: BalanceOf<T>,
+		},
 		/// A nominator has been kicked from a validator.
-		Kicked { nominator: T::AccountId, stash: T::AccountId },
+		Kicked {
+			nominator: T::AccountId,
+			stash: T::AccountId,
+		},
 		/// The election failed. No new era is planned.
 		StakingElectionFailed,
 		/// An account has stopped participating as either a validator or nominator.
-		Chilled { stash: T::AccountId },
+		Chilled {
+			stash: T::AccountId,
+		},
 		/// A Page of stakers rewards are getting paid. `next` is `None` if all pages are claimed.
 		PayoutStarted {
 			era_index: EraIndex,
@@ -875,22 +988,52 @@ pub mod pallet {
 			next: Option<Page>,
 		},
 		/// A validator has set their preferences.
-		ValidatorPrefsSet { stash: T::AccountId, prefs: ValidatorPrefs },
+		ValidatorPrefsSet {
+			stash: T::AccountId,
+			prefs: ValidatorPrefs,
+		},
 		/// Voters size limit reached.
-		SnapshotVotersSizeExceeded { size: u32 },
+		SnapshotVotersSizeExceeded {
+			size: u32,
+		},
 		/// Targets size limit reached.
-		SnapshotTargetsSizeExceeded { size: u32 },
-		/// A new force era mode was set.
-		ForceEra { mode: Forcing },
+		SnapshotTargetsSizeExceeded {
+			size: u32,
+		},
+		ForceEra {
+			mode: Forcing,
+		},
 		/// Report of a controller batch deprecation.
-		ControllerBatchDeprecated { failures: u32 },
+		ControllerBatchDeprecated {
+			failures: u32,
+		},
 		/// Validator has been disabled.
-		ValidatorDisabled { stash: T::AccountId },
+		ValidatorDisabled {
+			stash: T::AccountId,
+		},
 		/// Validator has been re-enabled.
-		ValidatorReenabled { stash: T::AccountId },
+		ValidatorReenabled {
+			stash: T::AccountId,
+		},
 		/// Staking balance migrated from locks to holds, with any balance that could not be held
 		/// is force withdrawn.
-		CurrencyMigrated { stash: T::AccountId, force_withdraw: BalanceOf<T> },
+		CurrencyMigrated {
+			stash: T::AccountId,
+			force_withdraw: BalanceOf<T>,
+		},
+		/// A page from a multi-page election was fetched. A number of these are followed by
+		/// `StakersElected`.
+		///
+		/// `Ok(count)` indicates the give number of stashes were added.
+		/// `Err(index)` indicates that the stashes after index were dropped.
+		/// `Err(0)` indicates that an error happened but no stashes were dropped nor added.
+		///
+		/// The error indicates that a number of validators were dropped due to excess size, but
+		/// the overall election will continue.
+		PagedElectionProceeded {
+			page: PageIndex,
+			result: Result<u32, u32>,
+		},
 	}
 
 	#[pallet::error]
@@ -970,9 +1113,38 @@ pub mod pallet {
 
 	#[pallet::hooks]
 	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
-		fn on_initialize(_now: BlockNumberFor<T>) -> Weight {
-			// just return the weight of the on_finalize.
-			T::DbWeight::get().reads(1)
+		/// Start fetching the election pages `Pages` blocks before the election prediction, so
+		/// that the `ElectableStashes` has been populated with all validators from all pages at
+		/// the time of the election.
+		fn on_initialize(now: BlockNumberFor<T>) -> Weight {
+			let pages = Self::election_pages();
+
+			// election ongoing, fetch the next page.
+			let inner_weight = if let Some(next_page) = NextElectionPage::<T>::get() {
+				let next_next_page = next_page.checked_sub(1);
+				NextElectionPage::<T>::set(next_next_page);
+				Self::do_elect_paged(next_page)
+			} else {
+				// election isn't ongoing yet, check if it should start.
+				let next_election = <Self as ElectionDataProvider>::next_election_prediction(now);
+
+				if now == (next_election.saturating_sub(pages.into())) {
+					crate::log!(
+						debug,
+						"elect(): start fetching solution pages. expected pages: {:?}",
+						pages
+					);
+
+					let current_page = pages.saturating_sub(1);
+					let next_page = current_page.checked_sub(1);
+					NextElectionPage::<T>::set(next_page);
+					Self::do_elect_paged(current_page)
+				} else {
+					Weight::default()
+				}
+			};
+
+			T::WeightInfo::on_initialize_noop().saturating_add(inner_weight)
 		}
 
 		fn on_finalize(_n: BlockNumberFor<T>) {
@@ -998,18 +1170,12 @@ pub mod pallet {
 			// and that MaxNominations is always greater than 1, since we count on this.
 			assert!(!MaxNominationsOf::<T>::get().is_zero());
 
-			// ensure election results are always bounded with the same value
-			assert!(
-				<T::ElectionProvider as ElectionProviderBase>::MaxWinners::get() ==
-					<T::GenesisElectionProvider as ElectionProviderBase>::MaxWinners::get()
-			);
-
 			assert!(
 				T::SlashDeferDuration::get() < T::BondingDuration::get() || T::BondingDuration::get() == 0,
 				"As per documentation, slash defer duration ({}) should be less than bonding duration ({}).",
 				T::SlashDeferDuration::get(),
 				T::BondingDuration::get(),
-			)
+			);
 		}
 
 		#[cfg(feature = "try-runtime")]
@@ -1030,7 +1196,7 @@ pub mod pallet {
 		}
 
 		/// Get the validators that may never be slashed or forcibly kicked out.
-		pub fn invulnerables() -> Vec<T::AccountId> {
+		pub fn invulnerables() -> BoundedVec<T::AccountId, T::MaxInvulnerables> {
 			Invulnerables::<T>::get()
 		}
 
@@ -1073,18 +1239,6 @@ pub mod pallet {
 			ErasStartSessionIndex::<T>::get(era_index)
 		}
 
-		/// Get the clipped exposure of a given validator at an era.
-		pub fn eras_stakers_clipped<EncodeLikeEraIndex, EncodeLikeAccountId>(
-			era_index: EncodeLikeEraIndex,
-			account_id: EncodeLikeAccountId,
-		) -> Exposure<T::AccountId, BalanceOf<T>>
-		where
-			EncodeLikeEraIndex: codec::EncodeLike<EraIndex>,
-			EncodeLikeAccountId: codec::EncodeLike<T::AccountId>,
-		{
-			ErasStakersClipped::<T>::get(era_index, account_id)
-		}
-
 		/// Get the paged history of claimed rewards by era for given validator.
 		pub fn claimed_rewards<EncodeLikeEraIndex, EncodeLikeAccountId>(
 			era_index: EncodeLikeEraIndex,
@@ -1604,18 +1758,15 @@ pub mod pallet {
 			#[pallet::compact] new: u32,
 		) -> DispatchResult {
 			ensure_root(origin)?;
-			// ensure new validator count does not exceed maximum winners
-			// support by election provider.
-			ensure!(
-				new <= <T::ElectionProvider as ElectionProviderBase>::MaxWinners::get(),
-				Error::<T>::TooManyValidators
-			);
+
+			ensure!(new <= T::MaxValidatorSet::get(), Error::<T>::TooManyValidators);
+
 			ValidatorCount::<T>::put(new);
 			Ok(())
 		}
 
 		/// Increments the ideal number of validators up to maximum of
-		/// `ElectionProviderBase::MaxWinners`.
+		/// `T::MaxValidatorSet`.
 		///
 		/// The dispatch origin must be Root.
 		///
@@ -1630,17 +1781,15 @@ pub mod pallet {
 			ensure_root(origin)?;
 			let old = ValidatorCount::<T>::get();
 			let new = old.checked_add(additional).ok_or(ArithmeticError::Overflow)?;
-			ensure!(
-				new <= <T::ElectionProvider as ElectionProviderBase>::MaxWinners::get(),
-				Error::<T>::TooManyValidators
-			);
+
+			ensure!(new <= T::MaxValidatorSet::get(), Error::<T>::TooManyValidators);
 
 			ValidatorCount::<T>::put(new);
 			Ok(())
 		}
 
 		/// Scale up the ideal number of validators by a factor up to maximum of
-		/// `ElectionProviderBase::MaxWinners`.
+		/// `T::MaxValidatorSet`.
 		///
 		/// The dispatch origin must be Root.
 		///
@@ -1653,10 +1802,7 @@ pub mod pallet {
 			let old = ValidatorCount::<T>::get();
 			let new = old.checked_add(factor.mul_floor(old)).ok_or(ArithmeticError::Overflow)?;
 
-			ensure!(
-				new <= <T::ElectionProvider as ElectionProviderBase>::MaxWinners::get(),
-				Error::<T>::TooManyValidators
-			);
+			ensure!(new <= T::MaxValidatorSet::get(), Error::<T>::TooManyValidators);
 
 			ValidatorCount::<T>::put(new);
 			Ok(())
@@ -1715,6 +1861,8 @@ pub mod pallet {
 			invulnerables: Vec<T::AccountId>,
 		) -> DispatchResult {
 			ensure_root(origin)?;
+			let invulnerables =
+				BoundedVec::try_from(invulnerables).map_err(|_| Error::<T>::BoundNotMet)?;
 			<Invulnerables<T>>::put(invulnerables);
 			Ok(())
 		}
@@ -1810,6 +1958,7 @@ pub mod pallet {
 			era: EraIndex,
 		) -> DispatchResultWithPostInfo {
 			ensure_signed(origin)?;
+
 			Self::do_payout_stakers(validator_stash, era)
 		}
 
diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs
index ae76b0707dc..98a6424fe7a 100644
--- a/substrate/frame/staking/src/slashing.rs
+++ b/substrate/frame/staking/src/slashing.rs
@@ -340,13 +340,15 @@ fn add_offending_validator<T: Config>(params: &SlashParams<T>) {
 				},
 				Err(index) => {
 					// Offender is not disabled, add to `DisabledValidators` and disable it
-					disabled.insert(index, (offender_idx, new_severity));
-					// Propagate disablement to session level
-					T::SessionInterface::disable_validator(offender_idx);
-					// Emit event that a validator got disabled
-					<Pallet<T>>::deposit_event(super::Event::<T>::ValidatorDisabled {
-						stash: params.stash.clone(),
-					});
+					if disabled.try_insert(index, (offender_idx, new_severity)).defensive().is_ok()
+					{
+						// Propagate disablement to session level
+						T::SessionInterface::disable_validator(offender_idx);
+						// Emit event that a validator got disabled
+						<Pallet<T>>::deposit_event(super::Event::<T>::ValidatorDisabled {
+							stash: params.stash.clone(),
+						});
+					}
 				},
 			}
 		}
diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs
index e8740f15af2..8fe3c8f1775 100644
--- a/substrate/frame/staking/src/tests.rs
+++ b/substrate/frame/staking/src/tests.rs
@@ -32,6 +32,7 @@ use frame_support::{
 		fungible::Inspect, Currency, Get, InspectLockableCurrency, LockableCurrency,
 		ReservableCurrency, WithdrawReasons,
 	},
+	BoundedVec,
 };
 
 use mock::*;
@@ -1371,6 +1372,7 @@ fn bond_extra_and_withdraw_unbonded_works() {
 				legacy_claimed_rewards: bounded_vec![],
 			}
 		);
+
 		assert_eq!(
 			Staking::eras_stakers(active_era(), &11),
 			Exposure { total: 1000, own: 1000, others: vec![] }
@@ -1921,7 +1923,11 @@ fn reward_to_stake_works() {
 			let _ = asset::set_stakeable_balance::<Test>(&20, 1000);
 
 			// Bypass logic and change current exposure
-			EraInfo::<Test>::set_exposure(0, &21, Exposure { total: 69, own: 69, others: vec![] });
+			EraInfo::<Test>::upsert_exposure(
+				0,
+				&21,
+				Exposure { total: 69, own: 69, others: vec![] },
+			);
 			<Ledger<Test>>::insert(
 				&20,
 				StakingLedgerInspect {
@@ -2272,14 +2278,14 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() {
 
 			// winners should be 21 and 31. Otherwise this election is taking duplicates into
 			// account.
-			let supports = <Test as Config>::ElectionProvider::elect().unwrap();
-			assert_eq!(
-				supports,
-				vec![
-					(21, Support { total: 1800, voters: vec![(21, 1000), (1, 400), (3, 400)] }),
-					(31, Support { total: 2200, voters: vec![(31, 1000), (1, 600), (3, 600)] })
-				],
-			);
+			let supports = <Test as Config>::ElectionProvider::elect(SINGLE_PAGE).unwrap();
+
+			let expected_supports = vec![
+				(21, Support { total: 1800, voters: vec![(21, 1000), (1, 400), (3, 400)] }),
+				(31, Support { total: 2200, voters: vec![(31, 1000), (1, 600), (3, 600)] }),
+			];
+
+			assert_eq!(supports, to_bounded_supports(expected_supports));
 		});
 }
 
@@ -2324,14 +2330,13 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() {
 			assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![21]));
 
 			// winners should be 21 and 11.
-			let supports = <Test as Config>::ElectionProvider::elect().unwrap();
-			assert_eq!(
-				supports,
-				vec![
-					(11, Support { total: 1500, voters: vec![(11, 1000), (1, 500)] }),
-					(21, Support { total: 2500, voters: vec![(21, 1000), (1, 500), (3, 1000)] })
-				],
-			);
+			let supports = <Test as Config>::ElectionProvider::elect(SINGLE_PAGE).unwrap();
+			let expected_supports = vec![
+				(11, Support { total: 1500, voters: vec![(11, 1000), (1, 500)] }),
+				(21, Support { total: 2500, voters: vec![(21, 1000), (1, 500), (3, 1000)] }),
+			];
+
+			assert_eq!(supports, to_bounded_supports(expected_supports));
 		});
 }
 
@@ -2374,7 +2379,7 @@ fn phragmen_should_not_overflow() {
 
 #[test]
 fn reward_validator_slashing_validator_does_not_overflow() {
-	ExtBuilder::default().build_and_execute(|| {
+	ExtBuilder::default().nominate(false).build_and_execute(|| {
 		let stake = u64::MAX as Balance * 2;
 		let reward_slash = u64::MAX as Balance * 2;
 
@@ -2384,7 +2389,6 @@ fn reward_validator_slashing_validator_does_not_overflow() {
 		// Set staker
 		let _ = asset::set_stakeable_balance::<Test>(&11, stake);
 
-		let exposure = Exposure::<AccountId, Balance> { total: stake, own: stake, others: vec![] };
 		let reward = EraRewardPoints::<AccountId> {
 			total: 1,
 			individual: vec![(11, 1)].into_iter().collect(),
@@ -2392,7 +2396,19 @@ fn reward_validator_slashing_validator_does_not_overflow() {
 
 		// Check reward
 		ErasRewardPoints::<Test>::insert(0, reward);
-		EraInfo::<Test>::set_exposure(0, &11, exposure);
+
+		// force exposure metadata to account for the overflowing `stake`.
+		ErasStakersOverview::<Test>::insert(
+			current_era(),
+			11,
+			PagedExposureMetadata { total: stake, own: stake, nominator_count: 0, page_count: 0 },
+		);
+
+		// we want to slash only self-stake, confirm that no others exposed.
+		let full_exposure_after = EraInfo::<Test>::get_full_exposure(current_era(), &11);
+		assert_eq!(full_exposure_after.total, stake);
+		assert_eq!(full_exposure_after.others, vec![]);
+
 		ErasValidatorReward::<Test>::insert(0, stake);
 		assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 0));
 		assert_eq!(asset::stakeable_balance::<Test>(&11), stake * 2);
@@ -2414,13 +2430,19 @@ fn reward_validator_slashing_validator_does_not_overflow() {
 
 		// only slashes out of bonded stake are applied. without this line, it is 0.
 		Staking::bond(RuntimeOrigin::signed(2), stake - 1, RewardDestination::Staked).unwrap();
-		// Override exposure of 11
-		EraInfo::<Test>::set_exposure(
-			0,
-			&11,
-			Exposure {
-				total: stake,
-				own: 1,
+
+		// Override metadata and exposures of 11 so that it exposes minmal self stake and `stake` -
+		// 1 from nominator 2.
+		ErasStakersOverview::<Test>::insert(
+			current_era(),
+			11,
+			PagedExposureMetadata { total: stake, own: 1, nominator_count: 1, page_count: 1 },
+		);
+
+		ErasStakersPaged::<Test>::insert(
+			(current_era(), &11, 0),
+			ExposurePage {
+				page_total: stake - 1,
 				others: vec![IndividualExposure { who: 2, value: stake - 1 }],
 			},
 		);
@@ -3128,6 +3150,7 @@ fn deferred_slashes_are_deferred() {
 			staking_events_since_last_call().as_slice(),
 			&[
 				Event::SlashReported { validator: 11, slash_era: 1, .. },
+				Event::PagedElectionProceeded { page: 0, result: Ok(2) },
 				Event::StakersElected,
 				..,
 				Event::Slashed { staker: 11, amount: 100 },
@@ -3464,6 +3487,7 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid
 			assert_eq!(
 				staking_events_since_last_call(),
 				vec![
+					Event::PagedElectionProceeded { page: 0, result: Ok(7) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
 					Event::SlashReported {
@@ -3537,6 +3561,7 @@ fn non_slashable_offence_disables_validator() {
 			assert_eq!(
 				staking_events_since_last_call(),
 				vec![
+					Event::PagedElectionProceeded { page: 0, result: Ok(7) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
 					Event::SlashReported {
@@ -3617,6 +3642,7 @@ fn slashing_independent_of_disabling_validator() {
 			assert_eq!(
 				staking_events_since_last_call(),
 				vec![
+					Event::PagedElectionProceeded { page: 0, result: Ok(5) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
 					Event::SlashReported {
@@ -3645,7 +3671,7 @@ fn slashing_independent_of_disabling_validator() {
 }
 
 #[test]
-fn offence_threshold_doesnt_trigger_new_era() {
+fn offence_threshold_doesnt_plan_new_era() {
 	ExtBuilder::default()
 		.validator_count(4)
 		.set_status(41, StakerStatus::Validator)
@@ -3883,12 +3909,17 @@ fn six_session_delay() {
 
 		// pallet-session is delaying session by one, thus the next session to plan is +2.
 		assert_eq!(<Staking as SessionManager<_>>::new_session(init_session + 2), None);
+
+		// note a new election happens independently of the call to `new_session`.
+		Staking::do_elect_paged(0);
 		assert_eq!(
 			<Staking as SessionManager<_>>::new_session(init_session + 3),
 			Some(val_set.clone())
 		);
 		assert_eq!(<Staking as SessionManager<_>>::new_session(init_session + 4), None);
 		assert_eq!(<Staking as SessionManager<_>>::new_session(init_session + 5), None);
+
+		Staking::do_elect_paged(0);
 		assert_eq!(
 			<Staking as SessionManager<_>>::new_session(init_session + 6),
 			Some(val_set.clone())
@@ -4129,17 +4160,8 @@ fn test_multi_page_payout_stakers_by_page() {
 		);
 
 		// verify rewards are tracked to prevent double claims
-		let ledger = Staking::ledger(11.into());
 		for page in 0..EraInfo::<Test>::get_page_count(1, &11) {
-			assert_eq!(
-				EraInfo::<Test>::is_rewards_claimed_with_legacy_fallback(
-					1,
-					ledger.as_ref().unwrap(),
-					&11,
-					page
-				),
-				true
-			);
+			assert_eq!(EraInfo::<Test>::is_rewards_claimed(1, &11, page), true);
 		}
 
 		for i in 3..16 {
@@ -4161,15 +4183,7 @@ fn test_multi_page_payout_stakers_by_page() {
 
 			// verify we track rewards for each era and page
 			for page in 0..EraInfo::<Test>::get_page_count(i - 1, &11) {
-				assert_eq!(
-					EraInfo::<Test>::is_rewards_claimed_with_legacy_fallback(
-						i - 1,
-						Staking::ledger(11.into()).as_ref().unwrap(),
-						&11,
-						page
-					),
-					true
-				);
+				assert_eq!(EraInfo::<Test>::is_rewards_claimed(i - 1, &11, page), true);
 			}
 		}
 
@@ -4328,7 +4342,6 @@ fn test_multi_page_payout_stakers_backward_compatible() {
 		}
 
 		// verify we no longer track rewards in `legacy_claimed_rewards` vec
-		let ledger = Staking::ledger(11.into());
 		assert_eq!(
 			Staking::ledger(11.into()).unwrap(),
 			StakingLedgerInspect {
@@ -4342,15 +4355,7 @@ fn test_multi_page_payout_stakers_backward_compatible() {
 
 		// verify rewards are tracked to prevent double claims
 		for page in 0..EraInfo::<Test>::get_page_count(1, &11) {
-			assert_eq!(
-				EraInfo::<Test>::is_rewards_claimed_with_legacy_fallback(
-					1,
-					ledger.as_ref().unwrap(),
-					&11,
-					page
-				),
-				true
-			);
+			assert_eq!(EraInfo::<Test>::is_rewards_claimed(1, &11, page), true);
 		}
 
 		for i in 3..16 {
@@ -4372,15 +4377,7 @@ fn test_multi_page_payout_stakers_backward_compatible() {
 
 			// verify we track rewards for each era and page
 			for page in 0..EraInfo::<Test>::get_page_count(i - 1, &11) {
-				assert_eq!(
-					EraInfo::<Test>::is_rewards_claimed_with_legacy_fallback(
-						i - 1,
-						Staking::ledger(11.into()).as_ref().unwrap(),
-						&11,
-						page
-					),
-					true
-				);
+				assert_eq!(EraInfo::<Test>::is_rewards_claimed(i - 1, &11, page), true);
 			}
 		}
 
@@ -4492,6 +4489,7 @@ fn test_page_count_and_size() {
 		mock::start_active_era(1);
 
 		// Since max exposure page size is 64, 2 pages of nominators are created.
+		assert_eq!(MaxExposurePageSize::get(), 64);
 		assert_eq!(EraInfo::<Test>::get_page_count(1, &11), 2);
 
 		// first page has 64 nominators
@@ -5206,41 +5204,6 @@ mod election_data_provider {
 	use super::*;
 	use frame_election_provider_support::ElectionDataProvider;
 
-	#[test]
-	fn targets_2sec_block() {
-		let mut validators = 1000;
-		while <Test as Config>::WeightInfo::get_npos_targets(validators).all_lt(Weight::from_parts(
-			2u64 * frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND,
-			u64::MAX,
-		)) {
-			validators += 1;
-		}
-
-		println!("Can create a snapshot of {} validators in 2sec block", validators);
-	}
-
-	#[test]
-	fn voters_2sec_block() {
-		// we assume a network only wants up to 1000 validators in most cases, thus having 2000
-		// candidates is as high as it gets.
-		let validators = 2000;
-		let mut nominators = 1000;
-
-		while <Test as Config>::WeightInfo::get_npos_voters(validators, nominators).all_lt(
-			Weight::from_parts(
-				2u64 * frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND,
-				u64::MAX,
-			),
-		) {
-			nominators += 1;
-		}
-
-		println!(
-			"Can create a snapshot of {} nominators [{} validators, each 1 slashing] in 2sec block",
-			nominators, validators
-		);
-	}
-
 	#[test]
 	fn set_minimum_active_stake_is_correct() {
 		ExtBuilder::default()
@@ -5251,14 +5214,15 @@ mod election_data_provider {
 			.build_and_execute(|| {
 				// default bounds are unbounded.
 				assert_ok!(<Staking as ElectionDataProvider>::electing_voters(
-					DataProviderBounds::default()
+					DataProviderBounds::default(),
+					0
 				));
 				assert_eq!(MinimumActiveStake::<Test>::get(), 10);
 
 				// remove staker with lower bond by limiting the number of voters and check
 				// `MinimumActiveStake` again after electing voters.
 				let bounds = ElectionBoundsBuilder::default().voters_count(5.into()).build();
-				assert_ok!(<Staking as ElectionDataProvider>::electing_voters(bounds.voters));
+				assert_ok!(<Staking as ElectionDataProvider>::electing_voters(bounds.voters, 0));
 				assert_eq!(MinimumActiveStake::<Test>::get(), 50);
 			});
 	}
@@ -5269,7 +5233,8 @@ mod election_data_provider {
 		ExtBuilder::default().has_stakers(false).build_and_execute(|| {
 			// default bounds are unbounded.
 			assert_ok!(<Staking as ElectionDataProvider>::electing_voters(
-				DataProviderBounds::default()
+				DataProviderBounds::default(),
+				0
 			));
 			assert_eq!(<Test as Config>::VoterList::count(), 0);
 			assert_eq!(MinimumActiveStake::<Test>::get(), 0);
@@ -5285,9 +5250,11 @@ mod election_data_provider {
 			assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![1]));
 			assert_eq!(<Test as Config>::VoterList::count(), 5);
 
-			let voters_before =
-				<Staking as ElectionDataProvider>::electing_voters(DataProviderBounds::default())
-					.unwrap();
+			let voters_before = <Staking as ElectionDataProvider>::electing_voters(
+				DataProviderBounds::default(),
+				0,
+			)
+			.unwrap();
 			assert_eq!(MinimumActiveStake::<Test>::get(), 5);
 
 			// update minimum nominator bond.
@@ -5297,9 +5264,11 @@ mod election_data_provider {
 			// lower than `MinNominatorBond`.
 			assert_eq!(<Test as Config>::VoterList::count(), 5);
 
-			let voters =
-				<Staking as ElectionDataProvider>::electing_voters(DataProviderBounds::default())
-					.unwrap();
+			let voters = <Staking as ElectionDataProvider>::electing_voters(
+				DataProviderBounds::default(),
+				0,
+			)
+			.unwrap();
 			assert_eq!(voters_before, voters);
 
 			// minimum active stake is lower than `MinNominatorBond`.
@@ -5317,6 +5286,7 @@ mod election_data_provider {
 				assert_eq!(Staking::weight_of(&101), 500);
 				let voters = <Staking as ElectionDataProvider>::electing_voters(
 					DataProviderBounds::default(),
+					0,
 				)
 				.unwrap();
 				assert_eq!(voters.len(), 5);
@@ -5332,6 +5302,7 @@ mod election_data_provider {
 
 				let voters = <Staking as ElectionDataProvider>::electing_voters(
 					DataProviderBounds::default(),
+					0,
 				)
 				.unwrap();
 				// number of returned voters decreases since ledger entry of stash 101 is now
@@ -5353,7 +5324,8 @@ mod election_data_provider {
 		ExtBuilder::default().nominate(false).build_and_execute(|| {
 			// default bounds are unbounded.
 			assert!(<Validators<Test>>::iter().map(|(x, _)| x).all(|v| Staking::electing_voters(
-				DataProviderBounds::default()
+				DataProviderBounds::default(),
+				0
 			)
 			.unwrap()
 			.into_iter()
@@ -5407,12 +5379,15 @@ mod election_data_provider {
 				// 11 is taken;
 				// we finish since the 2x limit is reached.
 				assert_eq!(
-					Staking::electing_voters(bounds_builder.voters_count(2.into()).build().voters)
-						.unwrap()
-						.iter()
-						.map(|(stash, _, _)| stash)
-						.copied()
-						.collect::<Vec<_>>(),
+					Staking::electing_voters(
+						bounds_builder.voters_count(2.into()).build().voters,
+						0
+					)
+					.unwrap()
+					.iter()
+					.map(|(stash, _, _)| stash)
+					.copied()
+					.collect::<Vec<_>>(),
 					vec![11],
 				);
 			});
@@ -5430,32 +5405,42 @@ mod election_data_provider {
 
 				// if voter count limit is less..
 				assert_eq!(
-					Staking::electing_voters(bounds_builder.voters_count(1.into()).build().voters)
-						.unwrap()
-						.len(),
+					Staking::electing_voters(
+						bounds_builder.voters_count(1.into()).build().voters,
+						0
+					)
+					.unwrap()
+					.len(),
 					1
 				);
 
 				// if voter count limit is equal..
 				assert_eq!(
-					Staking::electing_voters(bounds_builder.voters_count(5.into()).build().voters)
-						.unwrap()
-						.len(),
+					Staking::electing_voters(
+						bounds_builder.voters_count(5.into()).build().voters,
+						0
+					)
+					.unwrap()
+					.len(),
 					5
 				);
 
 				// if voter count limit is more.
 				assert_eq!(
-					Staking::electing_voters(bounds_builder.voters_count(55.into()).build().voters)
-						.unwrap()
-						.len(),
+					Staking::electing_voters(
+						bounds_builder.voters_count(55.into()).build().voters,
+						0
+					)
+					.unwrap()
+					.len(),
 					5
 				);
 
 				// if target count limit is more..
 				assert_eq!(
 					Staking::electable_targets(
-						bounds_builder.targets_count(6.into()).build().targets
+						bounds_builder.targets_count(6.into()).build().targets,
+						0,
 					)
 					.unwrap()
 					.len(),
@@ -5465,7 +5450,8 @@ mod election_data_provider {
 				// if target count limit is equal..
 				assert_eq!(
 					Staking::electable_targets(
-						bounds_builder.targets_count(4.into()).build().targets
+						bounds_builder.targets_count(4.into()).build().targets,
+						0,
 					)
 					.unwrap()
 					.len(),
@@ -5475,10 +5461,12 @@ mod election_data_provider {
 				// if target limit count is less, then we return an error.
 				assert_eq!(
 					Staking::electable_targets(
-						bounds_builder.targets_count(1.into()).build().targets
+						bounds_builder.targets_count(1.into()).build().targets,
+						0
 					)
-					.unwrap_err(),
-					"Target snapshot too big"
+					.unwrap()
+					.len(),
+					1,
 				);
 			});
 	}
@@ -5488,25 +5476,25 @@ mod election_data_provider {
 		ExtBuilder::default().build_and_execute(|| {
 			// voters: set size bounds that allows only for 1 voter.
 			let bounds = ElectionBoundsBuilder::default().voters_size(26.into()).build();
-			let elected = Staking::electing_voters(bounds.voters).unwrap();
+			let elected = Staking::electing_voters(bounds.voters, 0).unwrap();
 			assert!(elected.encoded_size() == 26 as usize);
 			let prev_len = elected.len();
 
 			// larger size bounds means more quota for voters.
 			let bounds = ElectionBoundsBuilder::default().voters_size(100.into()).build();
-			let elected = Staking::electing_voters(bounds.voters).unwrap();
+			let elected = Staking::electing_voters(bounds.voters, 0).unwrap();
 			assert!(elected.encoded_size() <= 100 as usize);
 			assert!(elected.len() > 1 && elected.len() > prev_len);
 
 			// targets: set size bounds that allows for only one target to fit in the snapshot.
 			let bounds = ElectionBoundsBuilder::default().targets_size(10.into()).build();
-			let elected = Staking::electable_targets(bounds.targets).unwrap();
+			let elected = Staking::electable_targets(bounds.targets, 0).unwrap();
 			assert!(elected.encoded_size() == 9 as usize);
 			let prev_len = elected.len();
 
 			// larger size bounds means more space for targets.
 			let bounds = ElectionBoundsBuilder::default().targets_size(100.into()).build();
-			let elected = Staking::electable_targets(bounds.targets).unwrap();
+			let elected = Staking::electable_targets(bounds.targets, 0).unwrap();
 			assert!(elected.encoded_size() <= 100 as usize);
 			assert!(elected.len() > 1 && elected.len() > prev_len);
 		});
@@ -5550,7 +5538,7 @@ mod election_data_provider {
 				// even through 61 has nomination quota of 2 at the time of the election, all the
 				// nominations (5) will be used.
 				assert_eq!(
-					Staking::electing_voters(DataProviderBounds::default())
+					Staking::electing_voters(DataProviderBounds::default(), 0)
 						.unwrap()
 						.iter()
 						.map(|(stash, _, targets)| (*stash, targets.len()))
@@ -5574,7 +5562,7 @@ mod election_data_provider {
 				// nominations of controller 70 won't be added due to voter size limit exceeded.
 				let bounds = ElectionBoundsBuilder::default().voters_size(100.into()).build();
 				assert_eq!(
-					Staking::electing_voters(bounds.voters)
+					Staking::electing_voters(bounds.voters, 0)
 						.unwrap()
 						.iter()
 						.map(|(stash, _, targets)| (*stash, targets.len()))
@@ -5591,7 +5579,7 @@ mod election_data_provider {
 				// include the electing voters of 70.
 				let bounds = ElectionBoundsBuilder::default().voters_size(1_000.into()).build();
 				assert_eq!(
-					Staking::electing_voters(bounds.voters)
+					Staking::electing_voters(bounds.voters, 0)
 						.unwrap()
 						.iter()
 						.map(|(stash, _, targets)| (*stash, targets.len()))
@@ -5602,10 +5590,10 @@ mod election_data_provider {
 	}
 
 	#[test]
-	fn estimate_next_election_works() {
+	fn estimate_next_election_single_page_works() {
 		ExtBuilder::default().session_per_era(5).period(5).build_and_execute(|| {
 			// first session is always length 0.
-			for b in 1..20 {
+			for b in 1..19 {
 				run_to_block(b);
 				assert_eq!(Staking::next_election_prediction(System::block_number()), 20);
 			}
@@ -5613,10 +5601,9 @@ mod election_data_provider {
 			// election
 			run_to_block(20);
 			assert_eq!(Staking::next_election_prediction(System::block_number()), 45);
-			assert_eq!(staking_events().len(), 1);
 			assert_eq!(*staking_events().last().unwrap(), Event::StakersElected);
 
-			for b in 21..45 {
+			for b in 21..44 {
 				run_to_block(b);
 				assert_eq!(Staking::next_election_prediction(System::block_number()), 45);
 			}
@@ -5624,7 +5611,6 @@ mod election_data_provider {
 			// election
 			run_to_block(45);
 			assert_eq!(Staking::next_election_prediction(System::block_number()), 70);
-			assert_eq!(staking_events().len(), 3);
 			assert_eq!(*staking_events().last().unwrap(), Event::StakersElected);
 
 			Staking::force_no_eras(RuntimeOrigin::root()).unwrap();
@@ -5647,7 +5633,6 @@ mod election_data_provider {
 			MinimumValidatorCount::<Test>::put(2);
 			run_to_block(55);
 			assert_eq!(Staking::next_election_prediction(System::block_number()), 55 + 25);
-			assert_eq!(staking_events().len(), 10);
 			assert_eq!(
 				*staking_events().last().unwrap(),
 				Event::ForceEra { mode: Forcing::NotForcing }
@@ -6160,7 +6145,7 @@ fn change_of_absolute_max_nominations() {
 			let bounds = DataProviderBounds::default();
 
 			// 3 validators and 3 nominators
-			assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 3);
+			assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 3);
 
 			// abrupt change from 16 to 4, everyone should be fine.
 			AbsoluteMaxNominations::set(4);
@@ -6171,7 +6156,7 @@ fn change_of_absolute_max_nominations() {
 					.collect::<Vec<_>>(),
 				vec![(101, 2), (71, 3), (61, 1)]
 			);
-			assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 3);
+			assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 3);
 
 			// No one can be chilled on account of non-decodable keys.
 			for k in Nominators::<Test>::iter_keys() {
@@ -6190,7 +6175,7 @@ fn change_of_absolute_max_nominations() {
 					.collect::<Vec<_>>(),
 				vec![(101, 2), (71, 3), (61, 1)]
 			);
-			assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 3);
+			assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 3);
 
 			// As before, no one can be chilled on account of non-decodable keys.
 			for k in Nominators::<Test>::iter_keys() {
@@ -6224,7 +6209,7 @@ fn change_of_absolute_max_nominations() {
 			// but its value cannot be decoded and default is returned.
 			assert!(Nominators::<Test>::get(71).is_none());
 
-			assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 2);
+			assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 2);
 			assert!(Nominators::<Test>::contains_key(101));
 
 			// abrupt change from 2 to 1, this should cause some nominators to be non-decodable, and
@@ -6248,7 +6233,7 @@ fn change_of_absolute_max_nominations() {
 			assert!(Nominators::<Test>::contains_key(61));
 			assert!(Nominators::<Test>::get(71).is_none());
 			assert!(Nominators::<Test>::get(61).is_some());
-			assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 1);
+			assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 1);
 
 			// now one of them can revive themselves by re-nominating to a proper value.
 			assert_ok!(Staking::nominate(RuntimeOrigin::signed(71), vec![1]));
@@ -6291,7 +6276,7 @@ fn nomination_quota_max_changes_decoding() {
 				vec![(70, 3), (101, 2), (50, 4), (30, 4), (60, 1)]
 			);
 			// 4 validators and 4 nominators
-			assert_eq!(Staking::electing_voters(unbonded_election).unwrap().len(), 4 + 4);
+			assert_eq!(Staking::electing_voters(unbonded_election, 0).unwrap().len(), 4 + 4);
 		});
 }
 
@@ -6692,7 +6677,8 @@ fn reducing_max_unlocking_chunks_abrupt() {
 #[test]
 fn cannot_set_unsupported_validator_count() {
 	ExtBuilder::default().build_and_execute(|| {
-		MaxWinners::set(50);
+		MaxValidatorSet::set(50);
+		MaxWinnersPerPage::set(50);
 		// set validator count works
 		assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 30));
 		assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 50));
@@ -6707,7 +6693,8 @@ fn cannot_set_unsupported_validator_count() {
 #[test]
 fn increase_validator_count_errors() {
 	ExtBuilder::default().build_and_execute(|| {
-		MaxWinners::set(50);
+		MaxValidatorSet::set(50);
+		MaxWinnersPerPage::set(50);
 		assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 40));
 
 		// increase works
@@ -6725,7 +6712,8 @@ fn increase_validator_count_errors() {
 #[test]
 fn scale_validator_count_errors() {
 	ExtBuilder::default().build_and_execute(|| {
-		MaxWinners::set(50);
+		MaxValidatorSet::set(50);
+		MaxWinnersPerPage::set(50);
 		assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 20));
 
 		// scale value works
@@ -6863,218 +6851,6 @@ fn should_retain_era_info_only_upto_history_depth() {
 	});
 }
 
-#[test]
-fn test_legacy_claimed_rewards_is_checked_at_reward_payout() {
-	ExtBuilder::default().has_stakers(false).build_and_execute(|| {
-		// Create a validator:
-		bond_validator(11, 1000);
-
-		// reward validator for next 2 eras
-		mock::start_active_era(1);
-		Pallet::<Test>::reward_by_ids(vec![(11, 1)]);
-		mock::start_active_era(2);
-		Pallet::<Test>::reward_by_ids(vec![(11, 1)]);
-		mock::start_active_era(3);
-
-		//verify rewards are not claimed
-		assert_eq!(
-			EraInfo::<Test>::is_rewards_claimed_with_legacy_fallback(
-				1,
-				Staking::ledger(11.into()).as_ref().unwrap(),
-				&11,
-				0
-			),
-			false
-		);
-		assert_eq!(
-			EraInfo::<Test>::is_rewards_claimed_with_legacy_fallback(
-				2,
-				Staking::ledger(11.into()).as_ref().unwrap(),
-				&11,
-				0
-			),
-			false
-		);
-
-		// assume reward claim for era 1 was stored in legacy storage
-		Ledger::<Test>::insert(
-			11,
-			StakingLedgerInspect {
-				stash: 11,
-				total: 1000,
-				active: 1000,
-				unlocking: Default::default(),
-				legacy_claimed_rewards: bounded_vec![1],
-			},
-		);
-
-		// verify rewards for era 1 cannot be claimed
-		assert_noop!(
-			Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0),
-			Error::<Test>::AlreadyClaimed
-				.with_weight(<Test as Config>::WeightInfo::payout_stakers_alive_staked(0)),
-		);
-		assert_eq!(
-			EraInfo::<Test>::is_rewards_claimed_with_legacy_fallback(
-				1,
-				Staking::ledger(11.into()).as_ref().unwrap(),
-				&11,
-				0
-			),
-			true
-		);
-
-		// verify rewards for era 2 can be claimed
-		assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0));
-		assert_eq!(
-			EraInfo::<Test>::is_rewards_claimed_with_legacy_fallback(
-				2,
-				Staking::ledger(11.into()).as_ref().unwrap(),
-				&11,
-				0
-			),
-			true
-		);
-		// but the new claimed rewards for era 2 is not stored in legacy storage
-		assert_eq!(
-			Ledger::<Test>::get(11).unwrap(),
-			StakingLedgerInspect {
-				stash: 11,
-				total: 1000,
-				active: 1000,
-				unlocking: Default::default(),
-				legacy_claimed_rewards: bounded_vec![1],
-			},
-		);
-		// instead it is kept in `ClaimedRewards`
-		assert_eq!(ClaimedRewards::<Test>::get(2, 11), vec![0]);
-	});
-}
-
-#[test]
-fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout() {
-	ExtBuilder::default().has_stakers(false).build_and_execute(|| {
-		// case 1: exposure exist in clipped.
-		// set page cap to 10
-		MaxExposurePageSize::set(10);
-		bond_validator(11, 1000);
-		let mut expected_individual_exposures: Vec<IndividualExposure<AccountId, Balance>> = vec![];
-		let mut total_exposure: Balance = 0;
-		// 1st exposure page
-		for i in 0..10 {
-			let who = 1000 + i;
-			let value = 1000 + i as Balance;
-			bond_nominator(who, value, vec![11]);
-			expected_individual_exposures.push(IndividualExposure { who, value });
-			total_exposure += value;
-		}
-
-		for i in 10..15 {
-			let who = 1000 + i;
-			let value = 1000 + i as Balance;
-			bond_nominator(who, value, vec![11]);
-			expected_individual_exposures.push(IndividualExposure { who, value });
-			total_exposure += value;
-		}
-
-		mock::start_active_era(1);
-		// reward validator for current era
-		Pallet::<Test>::reward_by_ids(vec![(11, 1)]);
-
-		// start new era
-		mock::start_active_era(2);
-		// verify exposure for era 1 is stored in paged storage, that each exposure is stored in
-		// one and only one page, and no exposure is repeated.
-		let actual_exposure_page_0 = ErasStakersPaged::<Test>::get((1, 11, 0)).unwrap();
-		let actual_exposure_page_1 = ErasStakersPaged::<Test>::get((1, 11, 1)).unwrap();
-		expected_individual_exposures.iter().for_each(|exposure| {
-			assert!(
-				actual_exposure_page_0.others.contains(exposure) ||
-					actual_exposure_page_1.others.contains(exposure)
-			);
-		});
-		assert_eq!(
-			expected_individual_exposures.len(),
-			actual_exposure_page_0.others.len() + actual_exposure_page_1.others.len()
-		);
-		// verify `EraInfo` returns page from paged storage
-		assert_eq!(
-			EraInfo::<Test>::get_paged_exposure(1, &11, 0).unwrap().others(),
-			&actual_exposure_page_0.others
-		);
-		assert_eq!(
-			EraInfo::<Test>::get_paged_exposure(1, &11, 1).unwrap().others(),
-			&actual_exposure_page_1.others
-		);
-		assert_eq!(EraInfo::<Test>::get_page_count(1, &11), 2);
-
-		// validator is exposed
-		assert!(<Staking as sp_staking::StakingInterface>::is_exposed_in_era(&11, &1));
-		// nominators are exposed
-		for i in 10..15 {
-			let who: AccountId = 1000 + i;
-			assert!(<Staking as sp_staking::StakingInterface>::is_exposed_in_era(&who, &1));
-		}
-
-		// case 2: exposure exist in ErasStakers and ErasStakersClipped (legacy).
-		// delete paged storage and add exposure to clipped storage
-		<ErasStakersPaged<Test>>::remove((1, 11, 0));
-		<ErasStakersPaged<Test>>::remove((1, 11, 1));
-		<ErasStakersOverview<Test>>::remove(1, 11);
-
-		<ErasStakers<Test>>::insert(
-			1,
-			11,
-			Exposure {
-				total: total_exposure,
-				own: 1000,
-				others: expected_individual_exposures.clone(),
-			},
-		);
-		let mut clipped_exposure = expected_individual_exposures.clone();
-		clipped_exposure.sort_by(|a, b| b.who.cmp(&a.who));
-		clipped_exposure.truncate(10);
-		<ErasStakersClipped<Test>>::insert(
-			1,
-			11,
-			Exposure { total: total_exposure, own: 1000, others: clipped_exposure.clone() },
-		);
-
-		// verify `EraInfo` returns exposure from clipped storage
-		let actual_exposure_paged = EraInfo::<Test>::get_paged_exposure(1, &11, 0).unwrap();
-		assert_eq!(actual_exposure_paged.others(), &clipped_exposure);
-		assert_eq!(actual_exposure_paged.own(), 1000);
-		assert_eq!(actual_exposure_paged.exposure_metadata.page_count, 1);
-
-		let actual_exposure_full = EraInfo::<Test>::get_full_exposure(1, &11);
-		assert_eq!(actual_exposure_full.others, expected_individual_exposures);
-		assert_eq!(actual_exposure_full.own, 1000);
-		assert_eq!(actual_exposure_full.total, total_exposure);
-
-		// validator is exposed
-		assert!(<Staking as sp_staking::StakingInterface>::is_exposed_in_era(&11, &1));
-		// nominators are exposed
-		for i in 10..15 {
-			let who: AccountId = 1000 + i;
-			assert!(<Staking as sp_staking::StakingInterface>::is_exposed_in_era(&who, &1));
-		}
-
-		// for pages other than 0, clipped storage returns empty exposure
-		assert_eq!(EraInfo::<Test>::get_paged_exposure(1, &11, 1), None);
-		// page size is 1 for clipped storage
-		assert_eq!(EraInfo::<Test>::get_page_count(1, &11), 1);
-
-		// payout for page 0 works
-		assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 0));
-		// payout for page 1 fails
-		assert_noop!(
-			Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 1),
-			Error::<Test>::InvalidPage
-				.with_weight(<Test as Config>::WeightInfo::payout_stakers_alive_staked(0))
-		);
-	});
-}
-
 #[test]
 fn test_runtime_api_pending_rewards() {
 	ExtBuilder::default().build_and_execute(|| {
@@ -7115,70 +6891,36 @@ fn test_runtime_api_pending_rewards() {
 			others: individual_exposures,
 		};
 
-		// add non-paged exposure for one and two.
-		<ErasStakers<Test>>::insert(0, validator_one, exposure.clone());
-		<ErasStakers<Test>>::insert(0, validator_two, exposure.clone());
-		// add paged exposure for third validator
-		EraInfo::<Test>::set_exposure(0, &validator_three, exposure);
+		// add exposure for validators
+		EraInfo::<Test>::upsert_exposure(0, &validator_one, exposure.clone());
+		EraInfo::<Test>::upsert_exposure(0, &validator_two, exposure.clone());
 
 		// add some reward to be distributed
 		ErasValidatorReward::<Test>::insert(0, 1000);
 
-		// mark rewards claimed for validator_one in legacy claimed rewards
-		<Ledger<Test>>::insert(
-			validator_one,
-			StakingLedgerInspect {
-				stash: validator_one,
-				total: stake,
-				active: stake,
-				unlocking: Default::default(),
-				legacy_claimed_rewards: bounded_vec![0],
-			},
-		);
-
-		// SCENARIO ONE: rewards already marked claimed in legacy storage.
-		// runtime api should return false for pending rewards for validator_one.
+		// SCENARIO: Validator with paged exposure (two pages).
+		// validators have not claimed rewards, so pending rewards is true.
+		assert!(EraInfo::<Test>::pending_rewards(0, &validator_one));
+		assert!(EraInfo::<Test>::pending_rewards(0, &validator_two));
+		// and payout works
+		assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_one, 0));
+		assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_two, 0));
+		// validators have two pages of exposure, so pending rewards is still true.
+		assert!(EraInfo::<Test>::pending_rewards(0, &validator_one));
+		assert!(EraInfo::<Test>::pending_rewards(0, &validator_two));
+		// payout again only for validator one
+		assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_one, 0));
+		// now pending rewards is false for validator one
 		assert!(!EraInfo::<Test>::pending_rewards(0, &validator_one));
-		// and if we try to pay, we get an error.
+		// and payout fails for validator one
 		assert_noop!(
 			Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_one, 0),
 			Error::<Test>::AlreadyClaimed.with_weight(err_weight)
 		);
-
-		// SCENARIO TWO: non-paged exposure
-		// validator two has not claimed rewards, so pending rewards is true.
+		// while pending reward is true for validator two
 		assert!(EraInfo::<Test>::pending_rewards(0, &validator_two));
-		// and payout works
+		// and payout works again for validator two.
 		assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_two, 0));
-		// now pending rewards is false.
-		assert!(!EraInfo::<Test>::pending_rewards(0, &validator_two));
-		// and payout fails
-		assert_noop!(
-			Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_two, 0),
-			Error::<Test>::AlreadyClaimed.with_weight(err_weight)
-		);
-
-		// SCENARIO THREE: validator with paged exposure (two pages).
-		// validator three has not claimed rewards, so pending rewards is true.
-		assert!(EraInfo::<Test>::pending_rewards(0, &validator_three));
-		// and payout works
-		assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0));
-		// validator three has two pages of exposure, so pending rewards is still true.
-		assert!(EraInfo::<Test>::pending_rewards(0, &validator_three));
-		// payout again
-		assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0));
-		// now pending rewards is false.
-		assert!(!EraInfo::<Test>::pending_rewards(0, &validator_three));
-		// and payout fails
-		assert_noop!(
-			Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0),
-			Error::<Test>::AlreadyClaimed.with_weight(err_weight)
-		);
-
-		// for eras with no exposure, pending rewards is false.
-		assert!(!EraInfo::<Test>::pending_rewards(0, &validator_one));
-		assert!(!EraInfo::<Test>::pending_rewards(0, &validator_two));
-		assert!(!EraInfo::<Test>::pending_rewards(0, &validator_three));
 	});
 }
 
@@ -7632,6 +7374,7 @@ mod staking_unchecked {
 		})
 	}
 }
+
 mod ledger {
 	use super::*;
 
@@ -8805,6 +8548,7 @@ fn reenable_lower_offenders_mock() {
 			assert_eq!(
 				staking_events_since_last_call(),
 				vec![
+					Event::PagedElectionProceeded { page: 0, result: Ok(7) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
 					Event::SlashReported {
@@ -8881,6 +8625,7 @@ fn do_not_reenable_higher_offenders_mock() {
 			assert_eq!(
 				staking_events_since_last_call(),
 				vec![
+					Event::PagedElectionProceeded { page: 0, result: Ok(7) },
 					Event::StakersElected,
 					Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 },
 					Event::SlashReported {
@@ -8946,12 +8691,12 @@ mod getters {
 		slashing,
 		tests::{Staking, Test},
 		ActiveEra, ActiveEraInfo, BalanceOf, CanceledSlashPayout, ClaimedRewards, CurrentEra,
-		CurrentPlannedSession, EraRewardPoints, ErasRewardPoints, ErasStakersClipped,
-		ErasStartSessionIndex, ErasTotalStake, ErasValidatorPrefs, ErasValidatorReward, ForceEra,
-		Forcing, Nominations, Nominators, Perbill, SlashRewardFraction, SlashingSpans,
-		ValidatorPrefs, Validators,
+		CurrentPlannedSession, EraRewardPoints, ErasRewardPoints, ErasStartSessionIndex,
+		ErasTotalStake, ErasValidatorPrefs, ErasValidatorReward, ForceEra, Forcing, Nominations,
+		Nominators, Perbill, SlashRewardFraction, SlashingSpans, ValidatorPrefs, Validators,
 	};
-	use sp_staking::{EraIndex, Exposure, IndividualExposure, Page, SessionIndex};
+	use frame_support::BoundedVec;
+	use sp_staking::{EraIndex, Page, SessionIndex};
 
 	#[test]
 	fn get_validator_count_returns_value_from_storage() {
@@ -8988,7 +8733,9 @@ mod getters {
 		sp_io::TestExternalities::default().execute_with(|| {
 			// given
 			let v: Vec<mock::AccountId> = vec![1, 2, 3];
-			Invulnerables::<Test>::put(v.clone());
+			Invulnerables::<Test>::put(
+				BoundedVec::try_from(v.clone()).expect("Too many invulnerable validators!"),
+			);
 
 			// when
 			let result = Staking::invulnerables();
@@ -9087,27 +8834,6 @@ mod getters {
 		});
 	}
 
-	#[test]
-	fn get_eras_stakers_clipped_returns_value_from_storage() {
-		sp_io::TestExternalities::default().execute_with(|| {
-			// given
-			let era: EraIndex = 12;
-			let account_id: mock::AccountId = 1;
-			let exposure: Exposure<mock::AccountId, BalanceOf<Test>> = Exposure {
-				total: 1125,
-				own: 1000,
-				others: vec![IndividualExposure { who: 101, value: 125 }],
-			};
-			ErasStakersClipped::<Test>::insert(era, account_id, exposure.clone());
-
-			// when
-			let result = Staking::eras_stakers_clipped(era, &account_id);
-
-			// then
-			assert_eq!(result, exposure);
-		});
-	}
-
 	#[test]
 	fn get_claimed_rewards_returns_value_from_storage() {
 		sp_io::TestExternalities::default().execute_with(|| {
diff --git a/substrate/frame/staking/src/tests_paged_election.rs b/substrate/frame/staking/src/tests_paged_election.rs
new file mode 100644
index 00000000000..76be6819d11
--- /dev/null
+++ b/substrate/frame/staking/src/tests_paged_election.rs
@@ -0,0 +1,971 @@
+// This file is part of Substrate.
+
+// Copyright (C) 2022 Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::{mock::*, *};
+use frame_support::{assert_ok, testing_prelude::*};
+use substrate_test_utils::assert_eq_uvec;
+
+use frame_election_provider_support::{
+	bounds::ElectionBoundsBuilder, ElectionDataProvider, SortedListProvider, Support,
+};
+use sp_staking::StakingInterface;
+
+mod electable_stashes {
+	use super::*;
+
+	#[test]
+	fn add_electable_stashes_work() {
+		ExtBuilder::default().try_state(false).build_and_execute(|| {
+			MaxValidatorSet::set(5);
+			assert_eq!(MaxValidatorSet::get(), 5);
+			assert!(ElectableStashes::<Test>::get().is_empty());
+
+			// adds stashes without duplicates, do not overflow bounds.
+			assert_ok!(Staking::add_electables(vec![1u64, 2, 3].into_iter()));
+			assert_eq!(
+				ElectableStashes::<Test>::get().into_inner().into_iter().collect::<Vec<_>>(),
+				vec![1, 2, 3]
+			);
+
+			// adds with duplicates which are deduplicated implicitly, no not overflow bounds.
+			assert_ok!(Staking::add_electables(vec![1u64, 2, 4].into_iter()));
+			assert_eq!(
+				ElectableStashes::<Test>::get().into_inner().into_iter().collect::<Vec<_>>(),
+				vec![1, 2, 3, 4]
+			);
+		})
+	}
+
+	#[test]
+	fn add_electable_stashes_overflow_works() {
+		ExtBuilder::default().try_state(false).build_and_execute(|| {
+			MaxValidatorSet::set(5);
+			assert_eq!(MaxValidatorSet::get(), 5);
+			assert!(ElectableStashes::<Test>::get().is_empty());
+
+			// adds stashes so that bounds are overflown, fails and internal state changes so that
+			// all slots are filled. error will return the idx of the first account that was not
+			// included.
+			let expected_idx_not_included = 5; // stash 6.
+			assert_eq!(
+				Staking::add_electables(vec![1u64, 2, 3, 4, 5, 6, 7, 8].into_iter()),
+				Err(expected_idx_not_included)
+			);
+			// the included were added to the electable stashes, despite the error.
+			assert_eq!(
+				ElectableStashes::<Test>::get().into_inner().into_iter().collect::<Vec<_>>(),
+				vec![1, 2, 3, 4, 5]
+			);
+		})
+	}
+
+	#[test]
+	fn overflow_electable_stashes_no_exposures_work() {
+		// ensures exposures are stored only for the electable stashes that fit within the
+		// electable stashes bounds in case of overflow.
+		ExtBuilder::default().try_state(false).build_and_execute(|| {
+			MaxValidatorSet::set(2);
+			assert_eq!(MaxValidatorSet::get(), 2);
+			assert!(ElectableStashes::<Test>::get().is_empty());
+
+			// current era is 0, preparing 1.
+			assert_eq!(current_era(), 0);
+
+			let supports = to_bounded_supports(vec![
+				(1, Support { total: 100, voters: vec![(10, 1_000)] }),
+				(2, Support { total: 200, voters: vec![(20, 2_000)] }),
+				(3, Support { total: 300, voters: vec![(30, 3_000)] }),
+				(4, Support { total: 400, voters: vec![(40, 4_000)] }),
+			]);
+
+			// error due to bounds.
+			let expected_not_included = 2;
+			assert_eq!(Staking::do_elect_paged_inner(supports), Err(expected_not_included));
+
+			// electable stashes have been collected to the max bounds despite the error.
+			assert_eq!(ElectableStashes::<Test>::get().into_iter().collect::<Vec<_>>(), vec![1, 2]);
+
+			let exposure_exists =
+				|acc, era| EraInfo::<Test>::get_full_exposure(era, &acc).total != 0;
+
+			// exposures were only collected for electable stashes in bounds (1 and 2).
+			assert!(exposure_exists(1, 1));
+			assert!(exposure_exists(2, 1));
+			assert!(!exposure_exists(3, 1));
+			assert!(!exposure_exists(4, 1));
+		})
+	}
+}
+
+mod paged_on_initialize {
+	use super::*;
+	use frame_election_provider_support::onchain;
+
+	#[test]
+	fn single_page_election_works() {
+		ExtBuilder::default()
+			// set desired targets to 3.
+			.validator_count(3)
+			.build_and_execute(|| {
+				let next_election = Staking::next_election_prediction(System::block_number());
+				assert_eq!(next_election, 10);
+
+				// single page.
+				let pages: BlockNumber = Staking::election_pages().into();
+				assert_eq!(pages, 1);
+
+				// genesis validators are now in place.
+				assert_eq!(current_era(), 0);
+				assert_eq_uvec!(Session::validators(), vec![11, 21, 31]);
+
+				// force unstake of 31 to ensure the election results of the next era are
+				// different than genesis.
+				assert_ok!(Staking::force_unstake(RuntimeOrigin::root(), 31, 0));
+
+				let expected_elected = Validators::<Test>::iter_keys()
+					.filter(|x| Staking::status(x) == Ok(StakerStatus::Validator))
+					.collect::<Vec<AccountId>>();
+				//  use all registered validators as potential targets.
+				ValidatorCount::<Test>::set(expected_elected.len() as u32);
+				assert_eq!(expected_elected.len(), 2);
+
+				// 1. election prep hasn't started yet, election cursor and electable stashes are
+				// not set yet.
+				run_to_block(8);
+				assert_eq!(NextElectionPage::<Test>::get(), None);
+				assert!(ElectableStashes::<Test>::get().is_empty());
+				assert_eq!(VoterSnapshotStatus::<Test>::get(), SnapshotStatus::Waiting);
+
+				// try-state sanity check.
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+				// 2. starts preparing election at the (election_prediction - n_pages) block.
+				run_to_block(9);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+				// electing started, but since single-page, we don't set `NextElectionPage` at all.
+				assert_eq!(NextElectionPage::<Test>::get(), None);
+				// now the electable stashes have been fetched and stored.
+				assert_eq_uvec!(
+					ElectableStashes::<Test>::get().into_iter().collect::<Vec<_>>(),
+					expected_elected
+				);
+				assert_eq!(VoterSnapshotStatus::<Test>::get(), SnapshotStatus::Waiting);
+
+				// era is still 0.
+				assert_eq!(current_era(), 0);
+
+				// 3. progress to election block, which matches with era rotation.
+				run_to_block(10);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+				assert_eq!(current_era(), 1);
+				// clears out election metadata for era.
+				assert!(NextElectionPage::<Test>::get().is_none());
+				assert!(ElectableStashes::<Test>::get().into_iter().collect::<Vec<_>>().is_empty());
+				assert_eq!(VoterSnapshotStatus::<Test>::get(), SnapshotStatus::Waiting);
+
+				// era progressed and electable stashes have been served to session pallet.
+				assert_eq_uvec!(Session::validators(), vec![11, 21, 31]);
+
+				// 4. in the next era, the validator set does not include 31 anymore which was
+				// unstaked.
+				start_active_era(2);
+				assert_eq_uvec!(Session::validators(), vec![11, 21]);
+			})
+	}
+
+	#[test]
+	fn single_page_election_era_transition_exposures_work() {
+		ExtBuilder::default()
+			// set desired targets to 3.
+			.validator_count(3)
+			.build_and_execute(|| {
+				assert_eq!(current_era(), 0);
+
+				// 3 sessions per era.
+				assert_eq!(SessionsPerEra::get(), 3);
+
+				// genesis validators and exposures.
+				assert_eq!(current_era(), 0);
+				assert_eq_uvec!(validator_controllers(), vec![11, 21, 31]);
+				assert_eq!(
+					era_exposures(current_era()),
+					vec![
+						(
+							11,
+							Exposure {
+								total: 1125,
+								own: 1000,
+								others: vec![IndividualExposure { who: 101, value: 125 }]
+							}
+						),
+						(
+							21,
+							Exposure {
+								total: 1375,
+								own: 1000,
+								others: vec![IndividualExposure { who: 101, value: 375 }]
+							}
+						),
+						(31, Exposure { total: 500, own: 500, others: vec![] })
+					]
+				);
+
+				// try-state sanity check.
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+				start_session(1);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+				assert_eq!(current_era(), 0);
+				// election haven't started yet.
+				assert_eq!(NextElectionPage::<Test>::get(), None);
+				assert!(ElectableStashes::<Test>::get().is_empty());
+
+				// progress to era rotation session.
+				start_session(SessionsPerEra::get());
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+				assert_eq!(current_era(), 1);
+				assert_eq_uvec!(Session::validators(), vec![11, 21, 31]);
+				assert_eq!(
+					era_exposures(current_era()),
+					vec![
+						(
+							11,
+							Exposure {
+								total: 1125,
+								own: 1000,
+								others: vec![IndividualExposure { who: 101, value: 125 }]
+							}
+						),
+						(
+							21,
+							Exposure {
+								total: 1375,
+								own: 1000,
+								others: vec![IndividualExposure { who: 101, value: 375 }]
+							}
+						),
+						(31, Exposure { total: 500, own: 500, others: vec![] })
+					]
+				);
+
+				// force unstake validator 31 for next era.
+				assert_ok!(Staking::force_unstake(RuntimeOrigin::root(), 31, 0));
+
+				// progress session and rotate era.
+				start_session(SessionsPerEra::get() * 2);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+				assert_eq!(current_era(), 2);
+				assert_eq_uvec!(Session::validators(), vec![11, 21]);
+
+				assert_eq!(
+					era_exposures(current_era()),
+					vec![
+						(
+							11,
+							Exposure {
+								total: 1125,
+								own: 1000,
+								others: vec![IndividualExposure { who: 101, value: 125 }]
+							}
+						),
+						(
+							21,
+							Exposure {
+								total: 1375,
+								own: 1000,
+								others: vec![IndividualExposure { who: 101, value: 375 }]
+							}
+						),
+					]
+				);
+
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+			})
+	}
+
+	#[test]
+	fn multi_page_election_works() {
+		ExtBuilder::default()
+			.add_staker(61, 61, 1000, StakerStatus::Validator)
+			.add_staker(71, 71, 1000, StakerStatus::Validator)
+			.add_staker(81, 81, 1000, StakerStatus::Validator)
+			.add_staker(91, 91, 1000, StakerStatus::Validator)
+			.multi_page_election_provider(3)
+			.max_winners_per_page(5)
+			.build_and_execute(|| {
+				// we need this later.
+				let genesis_validators = Session::validators();
+
+				// election provider has 3 pages.
+				let pages: BlockNumber =
+					<<Test as Config>::ElectionProvider as ElectionProvider>::Pages::get().into();
+				assert_eq!(pages, 3);
+
+                // 5 max winners per page.
+                let max_winners_page = <<Test as Config>::ElectionProvider as ElectionProvider>::MaxWinnersPerPage::get();
+                assert_eq!(max_winners_page, 5);
+
+                // genesis era.
+				assert_eq!(current_era(), 0);
+
+				// confirm the genesis validators.
+				assert_eq!(Session::validators(), vec![11, 21]);
+
+				let next_election = <Staking as ElectionDataProvider>::next_election_prediction(
+					System::block_number(),
+				);
+				assert_eq!(next_election, 10);
+
+				let expected_elected = Validators::<Test>::iter_keys()
+					.filter(|x| Staking::status(x) == Ok(StakerStatus::Validator))
+					// mock multi page election provider takes first `max_winners_page`
+					// validators as winners.
+					.take(max_winners_page as usize)
+					.collect::<Vec<AccountId>>();
+				// adjust desired targets to number of winners per page.
+				ValidatorCount::<Test>::set(expected_elected.len() as u32);
+				assert_eq!(expected_elected.len(), 5);
+
+				// try-state sanity check.
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+				// 1. election prep hasn't started yet, election cursor and electable stashes are
+				// not set yet.
+				run_to_block(6);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+				assert_eq!(NextElectionPage::<Test>::get(), None);
+				assert!(ElectableStashes::<Test>::get().is_empty());
+
+				// 2. starts preparing election at the (election_prediction - n_pages) block.
+				//  fetches msp (i.e. 2).
+				run_to_block(7);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+				// electing started at cursor is set once the election starts to be prepared.
+				assert_eq!(NextElectionPage::<Test>::get(), Some(1));
+				// now the electable stashes started to be fetched and stored.
+				assert_eq_uvec!(
+					ElectableStashes::<Test>::get().into_iter().collect::<Vec<_>>(),
+					expected_elected
+				);
+				// exposures have been collected for all validators in the page.
+				// note that the mock election provider adds one exposures per winner for
+				// each page.
+				for s in expected_elected.iter() {
+					// 1 page fetched, 1 `other` exposure collected per electable stash.
+					assert_eq!(Staking::eras_stakers(current_era() + 1, s).others.len(), 1);
+				}
+
+				// 3. progress one block to fetch page 1.
+				run_to_block(8);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+				// the electable stashes remain the same.
+				assert_eq_uvec!(
+					ElectableStashes::<Test>::get().into_iter().collect::<Vec<_>>(),
+					expected_elected
+				);
+				// election cursor moves along.
+				assert_eq!(NextElectionPage::<Test>::get(), Some(0));
+				// exposures have been collected for all validators in the page.
+				for s in expected_elected.iter() {
+					// 2 pages fetched, 2 `other` exposures collected per electable stash.
+					assert_eq!(Staking::eras_stakers(current_era() + 1, s).others.len(), 2);
+				}
+
+				// 4. progress one block to fetch lsp (i.e. 0).
+				run_to_block(9);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+				// the electable stashes remain the same.
+				assert_eq_uvec!(
+					ElectableStashes::<Test>::get().into_iter().collect::<Vec<_>>(),
+					expected_elected
+				);
+				// exposures have been collected for all validators in the page.
+				for s in expected_elected.iter() {
+					// 3 pages fetched, 3 `other` exposures collected per electable stash.
+					assert_eq!(Staking::eras_stakers(current_era() + 1, s).others.len(), 3);
+				}
+				assert_eq!(NextElectionPage::<Test>::get(), None);
+				assert_eq!(staking_events_since_last_call(), vec![
+					Event::PagedElectionProceeded { page: 2, result: Ok(5) },
+					Event::PagedElectionProceeded { page: 1, result: Ok(0) },
+					Event::PagedElectionProceeded { page: 0, result: Ok(0) }
+				]);
+
+				// upon fetching page 0, the electing started will remain in storage until the
+				// era rotates.
+				assert_eq!(current_era(), 0);
+
+				// Next block the era will rotate.
+				run_to_block(10);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+				// and all the metadata has been cleared up and ready for the next election.
+				assert!(NextElectionPage::<Test>::get().is_none());
+				assert!(ElectableStashes::<Test>::get().is_empty());
+				// events
+				assert_eq!(staking_events_since_last_call(), vec![
+					Event::StakersElected
+				]);
+				// session validators are not updated yet, these are genesis validators
+				assert_eq_uvec!(Session::validators(),  genesis_validators);
+
+				// next session they are updated.
+				advance_session();
+				// the new era validators are the expected elected stashes.
+				assert_eq_uvec!(Session::validators(), expected_elected);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+			})
+	}
+
+	#[test]
+	fn multi_page_election_with_mulit_page_exposures_rewards_work() {
+		ExtBuilder::default()
+			.add_staker(61, 61, 1000, StakerStatus::Validator)
+			.add_staker(71, 71, 1000, StakerStatus::Validator)
+            .add_staker(1, 1, 5, StakerStatus::Nominator(vec![21, 31, 71]))
+            .add_staker(2, 2, 5, StakerStatus::Nominator(vec![21, 31, 71]))
+            .add_staker(3, 3, 5, StakerStatus::Nominator(vec![21, 31, 71]))
+			.multi_page_election_provider(3)
+            .max_winners_per_page(3)
+            .exposures_page_size(2)
+			.build_and_execute(|| {
+				// election provider has 3 pages.
+				let pages: BlockNumber =
+					<<Test as Config>::ElectionProvider as ElectionProvider>::Pages::get().into();
+				assert_eq!(pages, 3);
+                // 3 max winners per page.
+                let max_winners_page = <<Test as Config>::ElectionProvider as ElectionProvider>::MaxWinnersPerPage::get();
+                assert_eq!(max_winners_page, 3);
+
+        		// setup validator payee prefs and 10% commission.
+                for s in vec![21, 31, 71] {
+        		    Payee::<Test>::insert(s, RewardDestination::Account(s));
+                    let prefs = ValidatorPrefs { commission: Perbill::from_percent(10), ..Default::default() };
+			        Validators::<Test>::insert(s, prefs.clone());
+                }
+
+                let init_balance_all = vec![21, 31, 71, 1, 2, 3].iter().fold(0, |mut acc, s| {
+                    acc += asset::total_balance::<Test>(&s);
+                    acc
+                });
+
+                // progress era.
+				assert_eq!(current_era(), 0);
+                start_active_era(1);
+				assert_eq!(current_era(), 1);
+                assert_eq!(Session::validators(), vec![21, 31, 71]);
+
+                // distribute reward,
+		        Pallet::<Test>::reward_by_ids(vec![(21, 50)]);
+		        Pallet::<Test>::reward_by_ids(vec![(31, 50)]);
+		        Pallet::<Test>::reward_by_ids(vec![(71, 50)]);
+
+        		let total_payout = current_total_payout_for_duration(reward_time_per_era());
+
+                start_active_era(2);
+
+                // all the validators exposed in era 1 have two pages of exposures, since exposure
+                // page size is 2.
+                assert_eq!(MaxExposurePageSize::get(), 2);
+                assert_eq!(EraInfo::<Test>::get_page_count(1, &21), 2);
+                assert_eq!(EraInfo::<Test>::get_page_count(1, &31), 2);
+                assert_eq!(EraInfo::<Test>::get_page_count(1, &71), 2);
+
+                make_all_reward_payment(1);
+
+                let balance_all = vec![21, 31, 71, 1, 2, 3].iter().fold(0, |mut acc, s| {
+                    acc += asset::total_balance::<Test>(&s);
+                    acc
+                });
+
+			    assert_eq_error_rate!(
+                    total_payout,
+                    balance_all - init_balance_all,
+                    4
+                );
+            })
+	}
+
+	#[test]
+	fn multi_page_election_is_graceful() {
+		// demonstrate that in a multi-page election, in some of the `elect(_)` calls fail we won't
+		// bail right away.
+		ExtBuilder::default().multi_page_election_provider(3).build_and_execute(|| {
+			// load some exact data into the election provider, some of which are error or empty.
+			let correct_results = <Test as Config>::GenesisElectionProvider::elect(0);
+			CustomElectionSupports::set(Some(vec![
+				// page 0.
+				correct_results.clone(),
+				// page 1.
+				Err(onchain::Error::FailedToBound),
+				// page 2.
+				Ok(Default::default()),
+			]));
+
+			// genesis era.
+			assert_eq!(current_era(), 0);
+
+			let next_election =
+				<Staking as ElectionDataProvider>::next_election_prediction(System::block_number());
+			assert_eq!(next_election, 10);
+
+			// try-state sanity check.
+			assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+			// 1. election prep hasn't started yet, election cursor and electable stashes are
+			// not set yet.
+			run_to_block(6);
+			assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+			assert_eq!(NextElectionPage::<Test>::get(), None);
+			assert!(ElectableStashes::<Test>::get().is_empty());
+
+			// 2. starts preparing election at the (election_prediction - n_pages) block.
+			//  fetches lsp (i.e. 2).
+			run_to_block(7);
+			assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+			// electing started at cursor is set once the election starts to be prepared.
+			assert_eq!(NextElectionPage::<Test>::get(), Some(1));
+			// in elect(2) we won't collect any stashes yet.
+			assert!(ElectableStashes::<Test>::get().is_empty());
+
+			// 3. progress one block to fetch page 1.
+			run_to_block(8);
+			assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+			// in elect(1) we won't collect any stashes yet.
+			assert!(ElectableStashes::<Test>::get().is_empty());
+			// election cursor is updated
+			assert_eq!(NextElectionPage::<Test>::get(), Some(0));
+
+			// 4. progress one block to fetch mps (i.e. 0).
+			run_to_block(9);
+			assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+			// some stashes come in.
+			assert_eq!(
+				ElectableStashes::<Test>::get().into_iter().collect::<Vec<_>>(),
+				vec![11 as AccountId, 21]
+			);
+			// cursor is now none
+			assert_eq!(NextElectionPage::<Test>::get(), None);
+
+			// events thus far
+			assert_eq!(
+				staking_events_since_last_call(),
+				vec![
+					Event::PagedElectionProceeded { page: 2, result: Ok(0) },
+					Event::PagedElectionProceeded { page: 1, result: Err(0) },
+					Event::PagedElectionProceeded { page: 0, result: Ok(2) }
+				]
+			);
+
+			// upon fetching page 0, the electing started will remain in storage until the
+			// era rotates.
+			assert_eq!(current_era(), 0);
+
+			// Next block the era will rotate.
+			run_to_block(10);
+			assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+			// and all the metadata has been cleared up and ready for the next election.
+			assert!(NextElectionPage::<Test>::get().is_none());
+			assert!(ElectableStashes::<Test>::get().is_empty());
+
+			// and the overall staking worked fine.
+			assert_eq!(staking_events_since_last_call(), vec![Event::StakersElected]);
+		})
+	}
+
+	#[test]
+	fn multi_page_election_fails_if_not_enough_validators() {
+		// a graceful multi-page election still fails if not enough validators are provided.
+		ExtBuilder::default()
+			.multi_page_election_provider(3)
+			.minimum_validator_count(3)
+			.build_and_execute(|| {
+				// load some exact data into the election provider, some of which are error or
+				// empty.
+				let correct_results = <Test as Config>::GenesisElectionProvider::elect(0);
+				CustomElectionSupports::set(Some(vec![
+					// page 0.
+					correct_results.clone(),
+					// page 1.
+					Err(onchain::Error::FailedToBound),
+					// page 2.
+					Ok(Default::default()),
+				]));
+
+				// genesis era.
+				assert_eq!(current_era(), 0);
+
+				let next_election = <Staking as ElectionDataProvider>::next_election_prediction(
+					System::block_number(),
+				);
+				assert_eq!(next_election, 10);
+
+				// try-state sanity check.
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+				// 1. election prep hasn't started yet, election cursor and electable stashes are
+				// not set yet.
+				run_to_block(6);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+				assert_eq!(NextElectionPage::<Test>::get(), None);
+				assert!(ElectableStashes::<Test>::get().is_empty());
+
+				// 2. starts preparing election at the (election_prediction - n_pages) block.
+				//  fetches lsp (i.e. 2).
+				run_to_block(7);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+				// electing started at cursor is set once the election starts to be prepared.
+				assert_eq!(NextElectionPage::<Test>::get(), Some(1));
+				// in elect(2) we won't collect any stashes yet.
+				assert!(ElectableStashes::<Test>::get().is_empty());
+
+				// 3. progress one block to fetch page 1.
+				run_to_block(8);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+				// in elect(1) we won't collect any stashes yet.
+				assert!(ElectableStashes::<Test>::get().is_empty());
+				// election cursor is updated
+				assert_eq!(NextElectionPage::<Test>::get(), Some(0));
+
+				// 4. progress one block to fetch mps (i.e. 0).
+				run_to_block(9);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+				// some stashes come in.
+				assert_eq!(
+					ElectableStashes::<Test>::get().into_iter().collect::<Vec<_>>(),
+					vec![11 as AccountId, 21]
+				);
+				// cursor is now none
+				assert_eq!(NextElectionPage::<Test>::get(), None);
+
+				// events thus far
+				assert_eq!(
+					staking_events_since_last_call(),
+					vec![
+						Event::PagedElectionProceeded { page: 2, result: Ok(0) },
+						Event::PagedElectionProceeded { page: 1, result: Err(0) },
+						Event::PagedElectionProceeded { page: 0, result: Ok(2) }
+					]
+				);
+
+				// upon fetching page 0, the electing started will remain in storage until the
+				// era rotates.
+				assert_eq!(current_era(), 0);
+
+				// Next block the era will rotate.
+				run_to_block(10);
+				assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number()));
+
+				// and all the metadata has been cleared up and ready for the next election.
+				assert!(NextElectionPage::<Test>::get().is_none());
+				assert!(ElectableStashes::<Test>::get().is_empty());
+
+				// and the overall staking worked fine.
+				assert_eq!(staking_events_since_last_call(), vec![Event::StakingElectionFailed]);
+			})
+	}
+}
+
+mod paged_snapshot {
+	use super::*;
+
+	#[test]
+	fn target_snapshot_works() {
+		ExtBuilder::default()
+			.nominate(true)
+			.set_status(41, StakerStatus::Validator)
+			.set_status(51, StakerStatus::Validator)
+			.set_status(101, StakerStatus::Idle)
+			.build_and_execute(|| {
+				// all registered validators.
+				let all_targets = vec![51, 31, 41, 21, 11];
+				assert_eq_uvec!(
+					<Test as Config>::TargetList::iter().collect::<Vec<_>>(),
+					all_targets,
+				);
+
+				// 3 targets per page.
+				let bounds =
+					ElectionBoundsBuilder::default().targets_count(3.into()).build().targets;
+
+				let targets =
+					<Staking as ElectionDataProvider>::electable_targets(bounds, 0).unwrap();
+				assert_eq_uvec!(targets, all_targets.iter().take(3).cloned().collect::<Vec<_>>());
+
+				// emulates a no bounds target snapshot request.
+				let bounds =
+					ElectionBoundsBuilder::default().targets_count(u32::MAX.into()).build().targets;
+
+				let single_page_targets =
+					<Staking as ElectionDataProvider>::electable_targets(bounds, 0).unwrap();
+
+				// complete set of paged targets is the same as single page, no bounds set of
+				// targets.
+				assert_eq_uvec!(all_targets, single_page_targets);
+			})
+	}
+
+	#[test]
+	fn target_snaposhot_multi_page_redundant() {
+		ExtBuilder::default().build_and_execute(|| {
+			let all_targets = vec![31, 21, 11];
+			assert_eq_uvec!(<Test as Config>::TargetList::iter().collect::<Vec<_>>(), all_targets,);
+
+			// no bounds.
+			let bounds =
+				ElectionBoundsBuilder::default().targets_count(u32::MAX.into()).build().targets;
+
+			// target snapshot supports only single-page, thus it is redundant what's the page index
+			// requested.
+			let snapshot = Staking::electable_targets(bounds, 0).unwrap();
+			assert!(
+				snapshot == all_targets &&
+					snapshot == Staking::electable_targets(bounds, 1).unwrap() &&
+					snapshot == Staking::electable_targets(bounds, 2).unwrap() &&
+					snapshot == Staking::electable_targets(bounds, u32::MAX).unwrap(),
+			);
+		})
+	}
+
+	#[test]
+	fn voter_snapshot_works() {
+		ExtBuilder::default()
+			.nominate(true)
+			.set_status(51, StakerStatus::Validator)
+			.set_status(41, StakerStatus::Nominator(vec![51]))
+			.set_status(101, StakerStatus::Validator)
+			.build_and_execute(|| {
+				let bounds = ElectionBoundsBuilder::default().voters_count(3.into()).build().voters;
+				assert_eq!(
+					<Test as Config>::VoterList::iter().collect::<Vec<_>>(),
+					vec![11, 21, 31, 41, 51, 101],
+				);
+
+				let mut all_voters = vec![];
+
+				let voters_page_3 = <Staking as ElectionDataProvider>::electing_voters(bounds, 3)
+					.unwrap()
+					.into_iter()
+					.map(|(a, _, _)| a)
+					.collect::<Vec<_>>();
+				all_voters.extend(voters_page_3.clone());
+
+				assert_eq!(voters_page_3, vec![11, 21, 31]);
+
+				let voters_page_2 = <Staking as ElectionDataProvider>::electing_voters(bounds, 2)
+					.unwrap()
+					.into_iter()
+					.map(|(a, _, _)| a)
+					.collect::<Vec<_>>();
+				all_voters.extend(voters_page_2.clone());
+
+				assert_eq!(voters_page_2, vec![41, 51, 101]);
+
+				// all voters in the list have been consumed.
+				assert_eq!(VoterSnapshotStatus::<Test>::get(), SnapshotStatus::Consumed);
+
+				// thus page 1 and 0 are empty.
+				assert!(<Staking as ElectionDataProvider>::electing_voters(bounds, 1)
+					.unwrap()
+					.is_empty());
+				assert!(<Staking as ElectionDataProvider>::electing_voters(bounds, 0)
+					.unwrap()
+					.is_empty());
+
+				// last page has been requested, reset the snapshot status to waiting.
+				assert_eq!(VoterSnapshotStatus::<Test>::get(), SnapshotStatus::Waiting);
+
+				// now request 1 page with bounds where all registered voters fit. u32::MAX
+				// emulates a no bounds request.
+				let bounds =
+					ElectionBoundsBuilder::default().voters_count(u32::MAX.into()).build().targets;
+
+				let single_page_voters =
+					<Staking as ElectionDataProvider>::electing_voters(bounds, 0)
+						.unwrap()
+						.into_iter()
+						.map(|(a, _, _)| a)
+						.collect::<Vec<_>>();
+
+				// complete set of paged voters is the same as single page, no bounds set of
+				// voters.
+				assert_eq!(all_voters, single_page_voters);
+			})
+	}
+
+	#[test]
+	#[should_panic]
+	fn voter_snapshot_starts_from_msp_to_lsp() {
+		todo!();
+	}
+}
+
+mod paged_exposures {
+	use super::*;
+
+	#[test]
+	fn genesis_collect_exposures_works() {
+		ExtBuilder::default().multi_page_election_provider(3).build_and_execute(|| {
+			// first, clean up all the era data and metadata to mimic a genesis election next.
+			Staking::clear_era_information(current_era());
+
+			// genesis election is single paged.
+			let genesis_result = <<Test as Config>::GenesisElectionProvider>::elect(0u32).unwrap();
+			let expected_exposures = Staking::collect_exposures(genesis_result.clone());
+
+			Staking::try_plan_new_era(0u32, true);
+
+			// expected exposures are stored for the expected genesis validators.
+			for exposure in expected_exposures {
+				assert_eq!(EraInfo::<Test>::get_full_exposure(0, &exposure.0), exposure.1);
+			}
+		})
+	}
+
+	#[test]
+	fn store_stakers_info_elect_works() {
+		ExtBuilder::default().exposures_page_size(2).build_and_execute(|| {
+			assert_eq!(MaxExposurePageSize::get(), 2);
+
+			let exposure_one = Exposure {
+				total: 1000 + 700,
+				own: 1000,
+				others: vec![
+					IndividualExposure { who: 101, value: 500 },
+					IndividualExposure { who: 102, value: 100 },
+					IndividualExposure { who: 103, value: 100 },
+				],
+			};
+
+			let exposure_two = Exposure {
+				total: 1000 + 1000,
+				own: 1000,
+				others: vec![
+					IndividualExposure { who: 104, value: 500 },
+					IndividualExposure { who: 105, value: 500 },
+				],
+			};
+
+			let exposure_three = Exposure {
+				total: 1000 + 500,
+				own: 1000,
+				others: vec![
+					IndividualExposure { who: 110, value: 250 },
+					IndividualExposure { who: 111, value: 250 },
+				],
+			};
+
+			let exposures_page_one = bounded_vec![(1, exposure_one), (2, exposure_two),];
+			let exposures_page_two = bounded_vec![(1, exposure_three),];
+
+			// stores exposure page with exposures of validator 1 and 2, returns exposed validator
+			// account id.
+			assert_eq!(
+				Pallet::<Test>::store_stakers_info(exposures_page_one, current_era()).to_vec(),
+				vec![1, 2]
+			);
+			// Stakers overview OK for validator 1 and 2.
+			assert_eq!(
+				ErasStakersOverview::<Test>::get(0, &1).unwrap(),
+				PagedExposureMetadata { total: 1700, own: 1000, nominator_count: 3, page_count: 2 },
+			);
+			assert_eq!(
+				ErasStakersOverview::<Test>::get(0, &2).unwrap(),
+				PagedExposureMetadata { total: 2000, own: 1000, nominator_count: 2, page_count: 1 },
+			);
+
+			// stores exposure page with exposures of validator 1, returns exposed validator
+			// account id.
+			assert_eq!(
+				Pallet::<Test>::store_stakers_info(exposures_page_two, current_era()).to_vec(),
+				vec![1]
+			);
+
+			// Stakers overview OK for validator 1.
+			assert_eq!(
+				ErasStakersOverview::<Test>::get(0, &1).unwrap(),
+				PagedExposureMetadata { total: 2200, own: 1000, nominator_count: 5, page_count: 3 },
+			);
+
+			// validator 1 has 3 paged exposures.
+			assert!(
+				ErasStakersPaged::<Test>::iter_prefix_values((0, &1)).count() as u32 ==
+					EraInfo::<Test>::get_page_count(0, &1) &&
+					EraInfo::<Test>::get_page_count(0, &1) == 3
+			);
+			assert!(ErasStakersPaged::<Test>::get((0, &1, 0)).is_some());
+			assert!(ErasStakersPaged::<Test>::get((0, &1, 1)).is_some());
+			assert!(ErasStakersPaged::<Test>::get((0, &1, 2)).is_some());
+			assert!(ErasStakersPaged::<Test>::get((0, &1, 3)).is_none());
+
+			// validator 2 has 1 paged exposures.
+			assert!(ErasStakersPaged::<Test>::get((0, &2, 0)).is_some());
+			assert!(ErasStakersPaged::<Test>::get((0, &2, 1)).is_none());
+			assert_eq!(ErasStakersPaged::<Test>::iter_prefix_values((0, &2)).count(), 1);
+
+			// exposures of validator 1 are the expected:
+			assert_eq!(
+				ErasStakersPaged::<Test>::get((0, &1, 0)).unwrap(),
+				ExposurePage {
+					page_total: 600,
+					others: vec![
+						IndividualExposure { who: 101, value: 500 },
+						IndividualExposure { who: 102, value: 100 }
+					]
+				},
+			);
+			assert_eq!(
+				ErasStakersPaged::<Test>::get((0, &1, 1)).unwrap(),
+				ExposurePage {
+					page_total: 350,
+					others: vec![
+						IndividualExposure { who: 103, value: 100 },
+						IndividualExposure { who: 110, value: 250 }
+					]
+				}
+			);
+			assert_eq!(
+				ErasStakersPaged::<Test>::get((0, &1, 2)).unwrap(),
+				ExposurePage {
+					page_total: 250,
+					others: vec![IndividualExposure { who: 111, value: 250 }]
+				}
+			);
+
+			// exposures of validator 2.
+			assert_eq!(
+				ErasStakersPaged::<Test>::iter_prefix_values((0, &2)).collect::<Vec<_>>(),
+				vec![ExposurePage {
+					page_total: 1000,
+					others: vec![
+						IndividualExposure { who: 104, value: 500 },
+						IndividualExposure { who: 105, value: 500 }
+					]
+				}],
+			);
+		})
+	}
+}
diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs
index 02ccdacb01c..92fe0e176a2 100644
--- a/substrate/frame/staking/src/weights.rs
+++ b/substrate/frame/staking/src/weights.rs
@@ -49,6 +49,9 @@ use core::marker::PhantomData;
 
 /// Weight functions needed for `pallet_staking`.
 pub trait WeightInfo {
+	fn on_initialize_noop() -> Weight;
+	fn do_elect_paged_inner(v: u32,) -> Weight;
+	fn clear_election_metadata() -> Weight;
 	fn bond() -> Weight;
 	fn bond_extra() -> Weight;
 	fn unbond() -> Weight;
@@ -72,7 +75,6 @@ pub trait WeightInfo {
 	fn payout_stakers_alive_staked(n: u32, ) -> Weight;
 	fn rebond(l: u32, ) -> Weight;
 	fn reap_stash(s: u32, ) -> Weight;
-	fn new_era(v: u32, n: u32, ) -> Weight;
 	fn get_npos_voters(v: u32, n: u32, ) -> Weight;
 	fn get_npos_targets(v: u32, ) -> Weight;
 	fn set_staking_configs_all_set() -> Weight;
@@ -87,6 +89,17 @@ pub trait WeightInfo {
 /// Weights for `pallet_staking` using the Substrate node and recommended hardware.
 pub struct SubstrateWeight<T>(PhantomData<T>);
 impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	// TODO CI-FAIL: run CI bench bot
+	fn on_initialize_noop() -> Weight {
+	    Default::default()
+	}
+	fn do_elect_paged_inner(_v: u32,) -> Weight {
+	    Default::default()
+	}
+	fn clear_election_metadata() -> Weight {
+	    Default::default()
+	}
+
 	/// Storage: `Staking::Bonded` (r:1 w:1)
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
@@ -614,60 +627,6 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListBags` (r:200 w:0)
 	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
-	/// Storage: `VoterList::ListNodes` (r:110 w:0)
-	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::Bonded` (r:110 w:0)
-	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::Ledger` (r:110 w:0)
-	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::Nominators` (r:110 w:0)
-	/// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::Validators` (r:11 w:0)
-	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::CounterForValidators` (r:1 w:0)
-	/// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ValidatorCount` (r:1 w:0)
-	/// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::MinimumValidatorCount` (r:1 w:0)
-	/// Proof: `Staking::MinimumValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::CurrentEra` (r:1 w:1)
-	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ErasValidatorPrefs` (r:0 w:10)
-	/// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ErasStakersPaged` (r:0 w:10)
-	/// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `Staking::ErasStakersOverview` (r:0 w:10)
-	/// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ErasTotalStake` (r:0 w:1)
-	/// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ErasStartSessionIndex` (r:0 w:1)
-	/// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::MinimumActiveStake` (r:0 w:1)
-	/// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
-	/// The range of component `v` is `[1, 10]`.
-	/// The range of component `n` is `[0, 100]`.
-	fn new_era(v: u32, n: u32, ) -> Weight {
-		// Proof Size summary in bytes:
-		//  Measured:  `0 + n * (720 ±0) + v * (3598 ±0)`
-		//  Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)`
-		// Minimum execution time: 692_301_000 picoseconds.
-		Weight::from_parts(708_732_000, 512390)
-			// Standard Error: 2_117_299
-			.saturating_add(Weight::from_parts(70_087_600, 0).saturating_mul(v.into()))
-			// Standard Error: 210_977
-			.saturating_add(Weight::from_parts(22_953_405, 0).saturating_mul(n.into()))
-			.saturating_add(T::DbWeight::get().reads(206_u64))
-			.saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into())))
-			.saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into())))
-			.saturating_add(T::DbWeight::get().writes(3_u64))
-			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into())))
-			.saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into()))
-			.saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into()))
-	}
-	/// Storage: `VoterList::CounterForListNodes` (r:1 w:0)
-	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `VoterList::ListBags` (r:200 w:0)
-	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListNodes` (r:2000 w:0)
 	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Bonded` (r:2000 w:0)
@@ -860,6 +819,17 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 
 // For backwards compatibility and tests.
 impl WeightInfo for () {
+	// TODO: run CI bench bot
+	fn on_initialize_noop() -> Weight {
+	    RocksDbWeight::get().reads(1)
+	}
+	fn do_elect_paged_inner(_v: u32,) -> Weight {
+	    RocksDbWeight::get().reads(1)
+	}
+	fn clear_election_metadata() -> Weight {
+	    RocksDbWeight::get().reads(1)
+	}
+
 	/// Storage: `Staking::Bonded` (r:1 w:1)
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
@@ -1387,60 +1357,6 @@ impl WeightInfo for () {
 	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListBags` (r:200 w:0)
 	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
-	/// Storage: `VoterList::ListNodes` (r:110 w:0)
-	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::Bonded` (r:110 w:0)
-	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::Ledger` (r:110 w:0)
-	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::Nominators` (r:110 w:0)
-	/// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::Validators` (r:11 w:0)
-	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::CounterForValidators` (r:1 w:0)
-	/// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ValidatorCount` (r:1 w:0)
-	/// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::MinimumValidatorCount` (r:1 w:0)
-	/// Proof: `Staking::MinimumValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::CurrentEra` (r:1 w:1)
-	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ErasValidatorPrefs` (r:0 w:10)
-	/// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ErasStakersPaged` (r:0 w:10)
-	/// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `Staking::ErasStakersOverview` (r:0 w:10)
-	/// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ErasTotalStake` (r:0 w:1)
-	/// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::ErasStartSessionIndex` (r:0 w:1)
-	/// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`)
-	/// Storage: `Staking::MinimumActiveStake` (r:0 w:1)
-	/// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
-	/// The range of component `v` is `[1, 10]`.
-	/// The range of component `n` is `[0, 100]`.
-	fn new_era(v: u32, n: u32, ) -> Weight {
-		// Proof Size summary in bytes:
-		//  Measured:  `0 + n * (720 ±0) + v * (3598 ±0)`
-		//  Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)`
-		// Minimum execution time: 692_301_000 picoseconds.
-		Weight::from_parts(708_732_000, 512390)
-			// Standard Error: 2_117_299
-			.saturating_add(Weight::from_parts(70_087_600, 0).saturating_mul(v.into()))
-			// Standard Error: 210_977
-			.saturating_add(Weight::from_parts(22_953_405, 0).saturating_mul(n.into()))
-			.saturating_add(RocksDbWeight::get().reads(206_u64))
-			.saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into())))
-			.saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into())))
-			.saturating_add(RocksDbWeight::get().writes(3_u64))
-			.saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(v.into())))
-			.saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into()))
-			.saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into()))
-	}
-	/// Storage: `VoterList::CounterForListNodes` (r:1 w:0)
-	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `VoterList::ListBags` (r:200 w:0)
-	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListNodes` (r:2000 w:0)
 	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Bonded` (r:2000 w:0)
@@ -1629,4 +1545,4 @@ impl WeightInfo for () {
 			.saturating_add(RocksDbWeight::get().reads(6_u64))
 			.saturating_add(RocksDbWeight::get().writes(2_u64))
 	}
-}
\ No newline at end of file
+}
diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs
index 80d3a5af266..1f19687c36f 100644
--- a/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs
+++ b/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs
@@ -160,7 +160,7 @@ pub fn expand_outer_enum(
 			#scrate::__private::codec::Encode,
 			#scrate::__private::codec::Decode,
 			#scrate::__private::scale_info::TypeInfo,
-			#scrate::__private::RuntimeDebug,
+			#scrate::__private::Debug,
 		)]
 		#[allow(non_camel_case_types)]
 		pub enum #enum_name_ident {
diff --git a/substrate/frame/support/procedural/src/pallet/expand/event.rs b/substrate/frame/support/procedural/src/pallet/expand/event.rs
index 8519143179d..45ca4b7df94 100644
--- a/substrate/frame/support/procedural/src/pallet/expand/event.rs
+++ b/substrate/frame/support/procedural/src/pallet/expand/event.rs
@@ -120,7 +120,7 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream {
 			#frame_support::CloneNoBound,
 			#frame_support::EqNoBound,
 			#frame_support::PartialEqNoBound,
-			#frame_support::RuntimeDebugNoBound,
+			#frame_support::DebugNoBound,
 			#frame_support::__private::codec::Encode,
 			#frame_support::__private::codec::Decode,
 			#frame_support::__private::scale_info::TypeInfo,
diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs
index cd3312ebfd8..6dbd0605a52 100644
--- a/substrate/frame/support/src/lib.rs
+++ b/substrate/frame/support/src/lib.rs
@@ -43,6 +43,7 @@ extern crate alloc;
 pub mod __private {
 	pub use alloc::{
 		boxed::Box,
+		fmt::Debug,
 		rc::Rc,
 		string::String,
 		vec,
diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr
index 2385a8f7ee4..3b0bf05bd4e 100644
--- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr
+++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr
@@ -237,12 +237,14 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied
 ...  |
 27 | |     }
 28 | | }
-   | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Event<Runtime>: std::fmt::Debug`
+   | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `&frame_system::Event<Runtime>: std::fmt::Debug`
    |
    = help: the trait `std::fmt::Debug` is implemented for `frame_system::Event<T>`
    = note: required for `frame_system::Event<Runtime>` to implement `std::fmt::Debug`
-   = note: required for the cast from `&frame_system::Event<Runtime>` to `&dyn std::fmt::Debug`
-   = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info)
+   = note: 1 redundant requirement hidden
+   = note: required for `&frame_system::Event<Runtime>` to implement `std::fmt::Debug`
+   = note: required for the cast from `&&frame_system::Event<Runtime>` to `&dyn std::fmt::Debug`
+   = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::Debug` (in Nightly builds, run with -Z macro-backtrace for more info)
 
 error[E0277]: the trait bound `Runtime: Config` is not satisfied
   --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1
@@ -254,12 +256,14 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied
 ...  |
 27 | |     }
 28 | | }
-   | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Error<Runtime>: std::fmt::Debug`
+   | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `&frame_system::Error<Runtime>: std::fmt::Debug`
    |
    = help: the trait `std::fmt::Debug` is implemented for `frame_system::Error<T>`
    = note: required for `frame_system::Error<Runtime>` to implement `std::fmt::Debug`
-   = note: required for the cast from `&frame_system::Error<Runtime>` to `&dyn std::fmt::Debug`
-   = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info)
+   = note: 1 redundant requirement hidden
+   = note: required for `&frame_system::Error<Runtime>` to implement `std::fmt::Debug`
+   = note: required for the cast from `&&frame_system::Error<Runtime>` to `&dyn std::fmt::Debug`
+   = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::Debug` (in Nightly builds, run with -Z macro-backtrace for more info)
 
 error[E0277]: the trait bound `Runtime: Config` is not satisfied
   --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1
diff --git a/substrate/primitives/npos-elections/src/helpers.rs b/substrate/primitives/npos-elections/src/helpers.rs
index 7df6ec9d9db..04f8a5648af 100644
--- a/substrate/primitives/npos-elections/src/helpers.rs
+++ b/substrate/primitives/npos-elections/src/helpers.rs
@@ -17,8 +17,11 @@
 
 //! Helper methods for npos-elections.
 
-use crate::{Assignment, Error, IdentifierT, PerThing128, StakedAssignment, VoteWeight};
-use alloc::vec::Vec;
+use crate::{
+	Assignment, Error, ExtendedBalance, IdentifierT, PerThing128, StakedAssignment, Supports,
+	VoteWeight,
+};
+use alloc::{collections::BTreeMap, vec::Vec};
 use sp_arithmetic::PerThing;
 
 /// Converts a vector of ratio assignments into ones with absolute budget value.
@@ -50,7 +53,7 @@ where
 {
 	let mut staked = assignment_ratio_to_staked(ratio, &stake_of);
 	staked.iter_mut().try_for_each(|a| {
-		a.try_normalize(stake_of(&a.who).into()).map_err(Error::ArithmeticError)
+		a.try_normalize(stake_of(&a.who).into()).map_err(|_| Error::ArithmeticError)
 	})?;
 	Ok(staked)
 }
@@ -70,11 +73,28 @@ pub fn assignment_staked_to_ratio_normalized<A: IdentifierT, P: PerThing128>(
 ) -> Result<Vec<Assignment<A, P>>, Error> {
 	let mut ratio = staked.into_iter().map(|a| a.into_assignment()).collect::<Vec<_>>();
 	for assignment in ratio.iter_mut() {
-		assignment.try_normalize().map_err(Error::ArithmeticError)?;
+		assignment.try_normalize().map_err(|_| Error::ArithmeticError)?;
 	}
 	Ok(ratio)
 }
 
+/// Convert some [`Supports`]s into vector of [`StakedAssignment`]
+pub fn supports_to_staked_assignment<A: IdentifierT>(
+	supports: Supports<A>,
+) -> Vec<StakedAssignment<A>> {
+	let mut staked: BTreeMap<A, Vec<(A, ExtendedBalance)>> = BTreeMap::new();
+	for (target, support) in supports {
+		for (voter, amount) in support.voters {
+			staked.entry(voter).or_default().push((target.clone(), amount))
+		}
+	}
+
+	staked
+		.into_iter()
+		.map(|(who, distribution)| StakedAssignment { who, distribution })
+		.collect::<Vec<_>>()
+}
+
 #[cfg(test)]
 mod tests {
 	use super::*;
diff --git a/substrate/primitives/npos-elections/src/lib.rs b/substrate/primitives/npos-elections/src/lib.rs
index 82ac40fe273..12d4c5948ed 100644
--- a/substrate/primitives/npos-elections/src/lib.rs
+++ b/substrate/primitives/npos-elections/src/lib.rs
@@ -83,7 +83,7 @@ use scale_info::TypeInfo;
 #[cfg(feature = "serde")]
 use serde::{Deserialize, Serialize};
 use sp_arithmetic::{traits::Zero, Normalizable, PerThing, Rational128, ThresholdOrd};
-use sp_core::{bounded::BoundedVec, RuntimeDebug};
+use sp_core::RuntimeDebug;
 
 #[cfg(test)]
 mod mock;
@@ -110,7 +110,9 @@ pub use reduce::reduce;
 pub use traits::{IdentifierT, PerThing128};
 
 /// The errors that might occur in this crate and `frame-election-provider-solution-type`.
-#[derive(Eq, PartialEq, RuntimeDebug)]
+#[derive(
+	Eq, PartialEq, RuntimeDebug, Clone, codec::Encode, codec::Decode, scale_info::TypeInfo,
+)]
 pub enum Error {
 	/// While going from solution indices to ratio, the weight of all the edges has gone above the
 	/// total.
@@ -122,11 +124,13 @@ pub enum Error {
 	/// One of the page indices was invalid.
 	SolutionInvalidPageIndex,
 	/// An error occurred in some arithmetic operation.
-	ArithmeticError(&'static str),
+	ArithmeticError,
 	/// The data provided to create support map was invalid.
 	InvalidSupportEdge,
 	/// The number of voters is bigger than the `MaxVoters` bound.
 	TooManyVoters,
+	/// Some bounds were exceeded when converting election types.
+	BoundsExceeded,
 }
 
 /// A type which is used in the API of this crate as a numeric weight of a vote, most often the
@@ -444,6 +448,18 @@ impl<AccountId> Default for Support<AccountId> {
 	}
 }
 
+impl<AccountId: Clone> Support<AccountId> {
+	pub fn self_vote_only(who: AccountId, amount: ExtendedBalance) -> (AccountId, Self) {
+		(who.clone(), Self { total: amount, voters: vec![(who, amount)] })
+	}
+}
+
+impl<AccountId> Backings for &Support<AccountId> {
+	fn total(&self) -> ExtendedBalance {
+		self.total
+	}
+}
+
 /// A target-major representation of the the election outcome.
 ///
 /// Essentially a flat variant of [`SupportMap`].
@@ -451,11 +467,6 @@ impl<AccountId> Default for Support<AccountId> {
 /// The main advantage of this is that it is encodable.
 pub type Supports<A> = Vec<(A, Support<A>)>;
 
-/// Same as `Supports` but bounded by `B`.
-///
-/// To note, the inner `Support` is still unbounded.
-pub type BoundedSupports<A, B> = BoundedVec<(A, Support<A>), B>;
-
 /// Linkage from a winner to their [`Support`].
 ///
 /// This is more helpful than a normal [`Supports`] as it allows faster error checking.
@@ -479,8 +490,7 @@ pub fn to_support_map<AccountId: IdentifierT>(
 	supports
 }
 
-/// Same as [`to_support_map`] except it returns a
-/// flat vector.
+/// Same as [`to_support_map`] except it returns a flat vector.
 pub fn to_supports<AccountId: IdentifierT>(
 	assignments: &[StakedAssignment<AccountId>],
 ) -> Supports<AccountId> {
@@ -499,23 +509,34 @@ pub trait EvaluateSupport {
 
 impl<AccountId: IdentifierT> EvaluateSupport for Supports<AccountId> {
 	fn evaluate(&self) -> ElectionScore {
-		let mut minimal_stake = ExtendedBalance::max_value();
-		let mut sum_stake: ExtendedBalance = Zero::zero();
-		// NOTE: The third element might saturate but fine for now since this will run on-chain and
-		// need to be fast.
-		let mut sum_stake_squared: ExtendedBalance = Zero::zero();
-
-		for (_, support) in self {
-			sum_stake = sum_stake.saturating_add(support.total);
-			let squared = support.total.saturating_mul(support.total);
-			sum_stake_squared = sum_stake_squared.saturating_add(squared);
-			if support.total < minimal_stake {
-				minimal_stake = support.total;
-			}
-		}
+		evaluate_support(self.iter().map(|(_, s)| s))
+	}
+}
 
-		ElectionScore { minimal_stake, sum_stake, sum_stake_squared }
+/// Generic representation of a support.
+pub trait Backings {
+	/// The total backing of an individual target.
+	fn total(&self) -> ExtendedBalance;
+}
+
+/// General evaluation of a list of backings that returns an election score.
+pub fn evaluate_support(backings: impl Iterator<Item = impl Backings>) -> ElectionScore {
+	let mut minimal_stake = ExtendedBalance::max_value();
+	let mut sum_stake: ExtendedBalance = Zero::zero();
+	// NOTE: The third element might saturate but fine for now since this will run on-chain and
+	// need to be fast.
+	let mut sum_stake_squared: ExtendedBalance = Zero::zero();
+
+	for support in backings {
+		sum_stake = sum_stake.saturating_add(support.total());
+		let squared = support.total().saturating_mul(support.total());
+		sum_stake_squared = sum_stake_squared.saturating_add(squared);
+		if support.total() < minimal_stake {
+			minimal_stake = support.total();
+		}
 	}
+
+	ElectionScore { minimal_stake, sum_stake, sum_stake_squared }
 }
 
 /// Converts raw inputs to types used in this crate.
diff --git a/substrate/primitives/npos-elections/src/phragmen.rs b/substrate/primitives/npos-elections/src/phragmen.rs
index f331152e722..404c2ff8e6b 100644
--- a/substrate/primitives/npos-elections/src/phragmen.rs
+++ b/substrate/primitives/npos-elections/src/phragmen.rs
@@ -97,7 +97,7 @@ pub fn seq_phragmen<AccountId: IdentifierT, P: PerThing128>(
 		voters.into_iter().filter_map(|v| v.into_assignment()).collect::<Vec<_>>();
 	let _ = assignments
 		.iter_mut()
-		.try_for_each(|a| a.try_normalize().map_err(crate::Error::ArithmeticError))?;
+		.try_for_each(|a| a.try_normalize().map_err(|_| crate::Error::ArithmeticError))?;
 	let winners = winners
 		.into_iter()
 		.map(|w_ptr| (w_ptr.borrow().who.clone(), w_ptr.borrow().backed_stake))
@@ -205,7 +205,7 @@ pub fn seq_phragmen_core<AccountId: IdentifierT>(
 		// edge of all candidates that eventually have a non-zero weight must be elected.
 		debug_assert!(voter.edges.iter().all(|e| e.candidate.borrow().elected));
 		// inc budget to sum the budget.
-		voter.try_normalize_elected().map_err(crate::Error::ArithmeticError)?;
+		voter.try_normalize_elected().map_err(|_| crate::Error::ArithmeticError)?;
 	}
 
 	Ok((candidates, voters))
diff --git a/substrate/primitives/npos-elections/src/phragmms.rs b/substrate/primitives/npos-elections/src/phragmms.rs
index 9a17f0dfa7c..6a44bf86512 100644
--- a/substrate/primitives/npos-elections/src/phragmms.rs
+++ b/substrate/primitives/npos-elections/src/phragmms.rs
@@ -71,7 +71,7 @@ pub fn phragmms<AccountId: IdentifierT, P: PerThing128>(
 	let _ = assignments
 		.iter_mut()
 		.try_for_each(|a| a.try_normalize())
-		.map_err(crate::Error::ArithmeticError)?;
+		.map_err(|_| crate::Error::ArithmeticError)?;
 	let winners = winners
 		.into_iter()
 		.map(|w_ptr| (w_ptr.borrow().who.clone(), w_ptr.borrow().backed_stake))
diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs
index 8e23c6800a9..538cef00e26 100644
--- a/substrate/primitives/staking/src/lib.rs
+++ b/substrate/primitives/staking/src/lib.rs
@@ -25,7 +25,7 @@ extern crate alloc;
 use crate::currency_to_vote::CurrencyToVote;
 use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec};
 use codec::{Decode, Encode, FullCodec, HasCompact, MaxEncodedLen};
-use core::ops::Sub;
+use core::ops::{Add, AddAssign, Sub, SubAssign};
 use scale_info::TypeInfo;
 use sp_runtime::{
 	traits::{AtLeast32BitUnsigned, Zero},
@@ -346,7 +346,7 @@ pub trait StakingUnchecked: StakingInterface {
 }
 
 /// The amount of exposure for an era that an individual nominator has (susceptible to slashing).
-#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo)]
+#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo, Copy)]
 pub struct IndividualExposure<AccountId, Balance: HasCompact> {
 	/// The stash account of the nominator in question.
 	pub who: AccountId,
@@ -379,7 +379,31 @@ impl<
 		Balance: HasCompact + AtLeast32BitUnsigned + Copy + codec::MaxEncodedLen,
 	> Exposure<AccountId, Balance>
 {
-	/// Splits an `Exposure` into `PagedExposureMetadata` and multiple chunks of
+	/// Splits self into two instances of exposures.
+	///
+	/// `n_others` individual exposures are consumed from self and returned as part of the new
+	/// exposure.
+	///
+	/// Since this method splits `others` of a single exposure, `total.own` will be the same for
+	/// both `self` and the returned exposure.
+	pub fn split_others(&mut self, n_others: u32) -> Self {
+		let head_others: Vec<_> =
+			self.others.drain(..(n_others as usize).min(self.others.len())).collect();
+
+		let total_others_head: Balance = head_others
+			.iter()
+			.fold(Zero::zero(), |acc: Balance, o| acc.saturating_add(o.value));
+
+		self.total = self.total.saturating_sub(total_others_head);
+
+		Self {
+			total: total_others_head.saturating_add(self.own),
+			own: self.own,
+			others: head_others,
+		}
+	}
+
+	/// Converts an `Exposure` into `PagedExposureMetadata` and multiple chunks of
 	/// `IndividualExposure` with each chunk having maximum of `page_size` elements.
 	pub fn into_pages(
 		self,
@@ -400,7 +424,6 @@ impl<
 					value: individual.value,
 				})
 			}
-
 			exposure_pages.push(ExposurePage { page_total, others });
 		}
 
@@ -432,6 +455,19 @@ impl<A, B: Default + HasCompact> Default for ExposurePage<A, B> {
 	}
 }
 
+/// Returns an exposure page from a set of individual exposures.
+impl<A, B: HasCompact + Default + AddAssign + SubAssign + Clone> From<Vec<IndividualExposure<A, B>>>
+	for ExposurePage<A, B>
+{
+	fn from(exposures: Vec<IndividualExposure<A, B>>) -> Self {
+		exposures.into_iter().fold(ExposurePage::default(), |mut page, e| {
+			page.page_total += e.value.clone();
+			page.others.push(e);
+			page
+		})
+	}
+}
+
 /// Metadata for Paged Exposure of a validator such as total stake across pages and page count.
 ///
 /// In combination with the associated `ExposurePage`s, it can be used to reconstruct a full
@@ -449,6 +485,7 @@ impl<A, B: Default + HasCompact> Default for ExposurePage<A, B> {
 	TypeInfo,
 	Default,
 	MaxEncodedLen,
+	Copy,
 )]
 pub struct PagedExposureMetadata<Balance: HasCompact + codec::MaxEncodedLen> {
 	/// The total balance backing this validator.
@@ -463,6 +500,42 @@ pub struct PagedExposureMetadata<Balance: HasCompact + codec::MaxEncodedLen> {
 	pub page_count: Page,
 }
 
+impl<Balance> PagedExposureMetadata<Balance>
+where
+	Balance: HasCompact
+		+ codec::MaxEncodedLen
+		+ Add<Output = Balance>
+		+ Sub<Output = Balance>
+		+ sp_runtime::Saturating
+		+ PartialEq
+		+ Copy
+		+ sp_runtime::traits::Debug,
+{
+	/// Consumes self and returns the result of the metadata updated with `other_balances` and
+	/// of adding `other_num` nominators to the metadata.
+	///
+	/// `Max` is a getter of the maximum number of nominators per page.
+	pub fn update_with<Max: sp_core::Get<u32>>(
+		self,
+		others_balance: Balance,
+		others_num: u32,
+	) -> Self {
+		let page_limit = Max::get().max(1);
+		let new_nominator_count = self.nominator_count.saturating_add(others_num);
+		let new_page_count = new_nominator_count
+			.saturating_add(page_limit)
+			.saturating_sub(1)
+			.saturating_div(page_limit);
+
+		Self {
+			total: self.total.saturating_add(others_balance),
+			own: self.own,
+			nominator_count: new_nominator_count,
+			page_count: new_page_count,
+		}
+	}
+}
+
 /// A type that belongs only in the context of an `Agent`.
 ///
 /// `Agent` is someone that manages delegated funds from [`Delegator`] accounts. It can
@@ -623,3 +696,114 @@ pub trait DelegationMigrator {
 }
 
 sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $);
+
+#[cfg(test)]
+mod tests {
+	use sp_core::ConstU32;
+
+	use super::*;
+
+	#[test]
+	fn update_with_works() {
+		let metadata = PagedExposureMetadata::<u32> {
+			total: 1000,
+			own: 0, // don't care
+			nominator_count: 10,
+			page_count: 1,
+		};
+
+		assert_eq!(
+			metadata.update_with::<ConstU32<10>>(1, 1),
+			PagedExposureMetadata { total: 1001, own: 0, nominator_count: 11, page_count: 2 },
+		);
+
+		assert_eq!(
+			metadata.update_with::<ConstU32<5>>(1, 1),
+			PagedExposureMetadata { total: 1001, own: 0, nominator_count: 11, page_count: 3 },
+		);
+
+		assert_eq!(
+			metadata.update_with::<ConstU32<4>>(1, 1),
+			PagedExposureMetadata { total: 1001, own: 0, nominator_count: 11, page_count: 3 },
+		);
+
+		assert_eq!(
+			metadata.update_with::<ConstU32<1>>(1, 1),
+			PagedExposureMetadata { total: 1001, own: 0, nominator_count: 11, page_count: 11 },
+		);
+	}
+
+	#[test]
+	fn individual_exposures_to_exposure_works() {
+		let exposure_1 = IndividualExposure { who: 1, value: 10u32 };
+		let exposure_2 = IndividualExposure { who: 2, value: 20 };
+		let exposure_3 = IndividualExposure { who: 3, value: 30 };
+
+		let exposure_page: ExposurePage<u32, u32> = vec![exposure_1, exposure_2, exposure_3].into();
+
+		assert_eq!(
+			exposure_page,
+			ExposurePage { page_total: 60, others: vec![exposure_1, exposure_2, exposure_3] },
+		);
+	}
+
+	#[test]
+	fn empty_individual_exposures_to_exposure_works() {
+		let empty_exposures: Vec<IndividualExposure<u32, u32>> = vec![];
+
+		let exposure_page: ExposurePage<u32, u32> = empty_exposures.into();
+		assert_eq!(exposure_page, ExposurePage { page_total: 0, others: vec![] });
+	}
+
+	#[test]
+	fn exposure_split_others_works() {
+		let exposure = Exposure {
+			total: 100,
+			own: 20,
+			others: vec![
+				IndividualExposure { who: 1, value: 20u32 },
+				IndividualExposure { who: 2, value: 20 },
+				IndividualExposure { who: 3, value: 20 },
+				IndividualExposure { who: 4, value: 20 },
+			],
+		};
+
+		let mut exposure_0 = exposure.clone();
+		// split others with with 0 `n_others` is a noop and returns an empty exposure (with `own`
+		// only).
+		let split_exposure = exposure_0.split_others(0);
+		assert_eq!(exposure_0, exposure);
+		assert_eq!(split_exposure, Exposure { total: 20, own: 20, others: vec![] });
+
+		let mut exposure_1 = exposure.clone();
+		// split individual exposures so that the returned exposure has 1 individual exposure.
+		let split_exposure = exposure_1.split_others(1);
+		assert_eq!(exposure_1.own, 20);
+		assert_eq!(exposure_1.total, 20 + 3 * 20);
+		assert_eq!(exposure_1.others.len(), 3);
+
+		assert_eq!(split_exposure.own, 20);
+		assert_eq!(split_exposure.total, 20 + 1 * 20);
+		assert_eq!(split_exposure.others.len(), 1);
+
+		let mut exposure_3 = exposure.clone();
+		// split individual exposures so that the returned exposure has 3 individual exposures,
+		// which are consumed from the original exposure.
+		let split_exposure = exposure_3.split_others(3);
+		assert_eq!(exposure_3.own, 20);
+		assert_eq!(exposure_3.total, 20 + 1 * 20);
+		assert_eq!(exposure_3.others.len(), 1);
+
+		assert_eq!(split_exposure.own, 20);
+		assert_eq!(split_exposure.total, 20 + 3 * 20);
+		assert_eq!(split_exposure.others.len(), 3);
+
+		let mut exposure_max = exposure.clone();
+		// split others with with more `n_others` than the number of others in the exposure
+		// consumes all the individual exposures of the original Exposure and returns them in the
+		// new exposure.
+		let split_exposure = exposure_max.split_others(u32::MAX);
+		assert_eq!(split_exposure, exposure);
+		assert_eq!(exposure_max, Exposure { total: 20, own: 20, others: vec![] });
+	}
+}
diff --git a/substrate/primitives/staking/src/offence.rs b/substrate/primitives/staking/src/offence.rs
index e73e8efe583..9e3c0e5a194 100644
--- a/substrate/primitives/staking/src/offence.rs
+++ b/substrate/primitives/staking/src/offence.rs
@@ -19,7 +19,7 @@
 //! that use staking.
 
 use alloc::vec::Vec;
-use codec::{Decode, Encode};
+use codec::{Decode, Encode, MaxEncodedLen};
 use sp_core::Get;
 use sp_runtime::{transaction_validity::TransactionValidityError, DispatchError, Perbill};
 
@@ -252,7 +252,15 @@ impl<Reporter, Evidence> OffenceReportSystem<Reporter, Evidence> for () {
 /// For instance used for the purposes of distinguishing who should be
 /// prioritized for disablement.
 #[derive(
-	Clone, Copy, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug, scale_info::TypeInfo,
+	Clone,
+	Copy,
+	PartialEq,
+	Eq,
+	Encode,
+	Decode,
+	MaxEncodedLen,
+	sp_runtime::RuntimeDebug,
+	scale_info::TypeInfo,
 )]
 pub struct OffenceSeverity(pub Perbill);
 
diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs
index 0c068fc585b..c7c5f5c478a 100644
--- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs
+++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs
@@ -598,6 +598,7 @@ impl PalletCmd {
 				let benchmark_name = &benchmark.name;
 				if extrinsic.is_empty() ||
 					extrinsic.as_bytes() == &b"*"[..] ||
+					extrinsic.as_bytes() == &b"all"[..] ||
 					extrinsics.contains(&&benchmark_name[..])
 				{
 					benchmarks_to_run.push((
@@ -645,7 +646,10 @@ impl PalletCmd {
 	fn pallet_selected(&self, pallet: &Vec<u8>) -> bool {
 		let include = self.pallet.clone().unwrap_or_default();
 
-		let included = include.is_empty() || include == "*" || include.as_bytes() == pallet;
+		let included = include.is_empty() ||
+			include == "*" ||
+			include == "all" ||
+			include.as_bytes() == pallet;
 		let excluded = self.exclude_pallets.iter().any(|p| p.as_bytes() == pallet);
 
 		included && !excluded
diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs
index 54a055d4a33..caa999c3a6c 100644
--- a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs
+++ b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs
@@ -50,7 +50,7 @@ pub struct PalletCmd {
 	#[arg(short, long, value_parser = parse_pallet_name, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))]
 	pub pallet: Option<String>,
 
-	/// Select an extrinsic inside the pallet to benchmark, or `*` for all.
+	/// Select an extrinsic inside the pallet to benchmark, or `*` or 'all' for all.
 	#[arg(short, long, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))]
 	pub extrinsic: Option<String>,
 
diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml
index fc0b2d5a140..80b72febfb5 100644
--- a/umbrella/Cargo.toml
+++ b/umbrella/Cargo.toml
@@ -87,6 +87,7 @@ std = [
 	"pallet-delegated-staking?/std",
 	"pallet-democracy?/std",
 	"pallet-dev-mode?/std",
+	"pallet-election-provider-multi-block?/std",
 	"pallet-election-provider-multi-phase?/std",
 	"pallet-election-provider-support-benchmarking?/std",
 	"pallet-elections-phragmen?/std",
@@ -281,6 +282,7 @@ runtime-benchmarks = [
 	"pallet-core-fellowship?/runtime-benchmarks",
 	"pallet-delegated-staking?/runtime-benchmarks",
 	"pallet-democracy?/runtime-benchmarks",
+	"pallet-election-provider-multi-block?/runtime-benchmarks",
 	"pallet-election-provider-multi-phase?/runtime-benchmarks",
 	"pallet-election-provider-support-benchmarking?/runtime-benchmarks",
 	"pallet-elections-phragmen?/runtime-benchmarks",
@@ -417,6 +419,7 @@ try-runtime = [
 	"pallet-delegated-staking?/try-runtime",
 	"pallet-democracy?/try-runtime",
 	"pallet-dev-mode?/try-runtime",
+	"pallet-election-provider-multi-block?/try-runtime",
 	"pallet-election-provider-multi-phase?/try-runtime",
 	"pallet-elections-phragmen?/try-runtime",
 	"pallet-fast-unstake?/try-runtime",
@@ -546,7 +549,7 @@ with-tracing = [
 	"sp-tracing?/with-tracing",
 	"sp-tracing?/with-tracing",
 ]
-runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-rewards", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"]
+runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-rewards", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-block", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"]
 runtime = [
 	"frame-benchmarking",
 	"frame-benchmarking-pallet-pov",
@@ -1028,6 +1031,11 @@ default-features = false
 optional = true
 path = "../substrate/frame/examples/dev-mode"
 
+[dependencies.pallet-election-provider-multi-block]
+default-features = false
+optional = true
+path = "../substrate/frame/election-provider-multi-block"
+
 [dependencies.pallet-election-provider-multi-phase]
 default-features = false
 optional = true
diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs
index a132f16a2c3..79a4ed9960e 100644
--- a/umbrella/src/lib.rs
+++ b/umbrella/src/lib.rs
@@ -443,6 +443,10 @@ pub use pallet_democracy;
 #[cfg(feature = "pallet-dev-mode")]
 pub use pallet_dev_mode;
 
+/// PALLET multi phase+block election providers.
+#[cfg(feature = "pallet-election-provider-multi-block")]
+pub use pallet_election_provider_multi_block;
+
 /// PALLET two phase election providers.
 #[cfg(feature = "pallet-election-provider-multi-phase")]
 pub use pallet_election_provider_multi_phase;
-- 
GitLab