From 6cfdbe5caaf8c38ce125725e16ae282455128552 Mon Sep 17 00:00:00 2001
From: Kian Paimani <5588131+kianenigma@users.noreply.github.com>
Date: Fri, 4 Mar 2022 15:54:23 +0000
Subject: [PATCH] Trie version migration pallet  (#10073)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

* starting

* Updated from other branch.

* setting flag

* flag in storage struct

* fix flagging to access and insert.

* added todo to fix

* also missing serialize meta to storage proof

* extract meta.

* Isolate old trie layout.

* failing test that requires storing in meta when old hash scheme is used.

* old hash compatibility

* Db migrate.

* runing tests with both states when interesting.

* fix chain spec test with serde default.

* export state (missing trie function).

* Pending using new branch, lacking genericity on layout resolution.

* extract and set global meta

* Update to branch 4

* fix iterator with root flag (no longer insert node).

* fix trie root hashing of root

* complete basic backend.

* Remove old_hash meta from proof that do not use inner_hashing.

* fix trie test for empty (force layout on empty deltas).

* Root update fix.

* debug on meta

* Use trie key iteration that do not include value in proofs.

* switch default test ext to use inner hash.

* small integration test, and fix tx cache mgmt in ext.
test  failing

* Proof scenario at state-machine level.

* trace for db upgrade

* try different param

* act more like iter_from.

* Bigger batches.

* Update trie dependency.

* drafting codec changes and refact

* before removing unused branch no value alt hashing.
more work todo rename all flag var to alt_hash, and remove extrinsic
replace by storage query at every storage_root call.

* alt hashing only for branch with value.

* fix trie tests

* Hash of value include the encoded size.

* removing fields(broken)

* fix trie_stream to also include value length in inner hash.

* triedbmut only using alt type if inner hashing.

* trie_stream to also only use alt hashing type when actually alt hashing.

* Refactor meta state, logic should work with change of trie treshold.

* Remove NoMeta variant.

* Remove state_hashed trigger specific functions.

* pending switching to using threshold, new storage root api does not
make much sense.

* refactoring to use state from backend (not possible payload changes).

* Applying from previous state

* Remove default from storage, genesis need a special build.

* rem empty space

* Catch problem: when using triedb with default: we should not revert
nodes: otherwhise thing as trie codec cannot decode-encode without
changing state.

* fix compilation

* Right logic to avoid switch on reencode when default layout.

* Clean up some todos

* remove trie meta from root upstream

* update upstream and fix benches.

* split some long lines.

* UPdate trie crate to work with new design.

* Finish update to refactored upstream.

* update to latest triedb changes.

* Clean up.

* fix executor test.

* rust fmt from master.

* rust format.

* rustfmt

* fix

* start host function driven versioning

* update state-machine part

* still need access to state version from runtime

* state hash in mem: wrong

* direction likely correct, but passing call to code exec for genesis
init seem awkward.

* state version serialize in runtime, wrong approach, just initialize it
with no threshold for core api < 4 seems more proper.

* stateversion from runtime version (core api >= 4).

* update trie, fix tests

* unused import

* clean some TODOs

* Require RuntimeVersionOf for executor

* use RuntimeVersionOf to resolve genesis state version.

* update runtime version test

* fix state-machine tests

* TODO

* Use runtime version from storage wasm with fast sync.

* rustfmt

* fmt

* fix test

* revert useless changes.

* clean some unused changes

* fmt

* removing useless trait function.

* remove remaining reference to state_hash

* fix some imports

* Follow chain state version management.

* trie update, fix and constant threshold for trie layouts.

* update deps

* Update to latest trie pr changes.

* fix benches

* Verify proof requires right layout.

* update trie_root

* Update trie deps to  latest

* Update to latest trie versioning

* Removing patch

* update lock

* extrinsic for sc-service-test using layout v0.

* Adding RuntimeVersionOf to CallExecutor works.

* fmt

* error when resolving version and no wasm in storage.

* use existing utils to instantiate runtime code.

* migration pallet

* Patch to delay runtime switch.

* Revert "Patch to delay runtime switch."

This reverts commit 67e55fee468f1a0cda853f5362b22e0d775786da.

* fix test

* fix child migration calls.

* useless closure

* remove remaining state_hash variables.

* Fix and add more tests

* Remove outdated comment

* useless inner hash

* fmt

* remote tests

* finally ksm works

* batches are broken

* clean the benchmarks

* Apply suggestions from code review

Co-authored-by: Guillaume Thiolliere <gui.thiolliere@gmail.com>

* Apply suggestions from code review

Co-authored-by: Guillaume Thiolliere <gui.thiolliere@gmail.com>

* Update frame/state-trie-migration/src/lib.rs

Co-authored-by: Joshy Orndorff <JoshOrndorff@users.noreply.github.com>

* Update frame/state-trie-migration/src/lib.rs

* brand new version

* fix build

* Update frame/state-trie-migration/src/lib.rs

Co-authored-by: Guillaume Thiolliere <gui.thiolliere@gmail.com>

* Update frame/state-trie-migration/src/lib.rs

Co-authored-by: Guillaume Thiolliere <gui.thiolliere@gmail.com>

* Update primitives/storage/src/lib.rs

Co-authored-by: cheme <emericchevalier.pro@gmail.com>

* Update frame/state-trie-migration/src/lib.rs

Co-authored-by: cheme <emericchevalier.pro@gmail.com>

* Update frame/state-trie-migration/src/lib.rs

Co-authored-by: cheme <emericchevalier.pro@gmail.com>

* fmt and opt-in feature to apply state change.

* feature gate core version, use new test feature for node and test node

* Use a 'State' api version instead of Core one.

* fix merge of test function

* use blake macro.

* Fix state api (require declaring the api in runtime).

* Opt out feature, fix macro for io to select a given version
instead of latest.

* run test nodes on new state.

* fix

* new test structure

* new testing stuff from emeric

* Add commit_all, still not working

* Fix all tests

* add comment

* we have PoV tracking baby

* document stuff, but proof size is still wrong

* FUCK YEAH

* a big batch of review comments

* add more tests

* tweak test

* update config

* some remote-ext stuff

* delete some of the old stuff

* sync more files with master to minimize the diff

* Fix all tests

* make signed migration a bit more relaxed

* add witness check to signed submissions

* allow custom migration to also go above limit

* Fix these pesky tests

* ==== removal of the unsigned stuff ====

* Make all tests work again

* separate the tests from the logic so it can be reused easier

* fix overall build

* Update frame/state-trie-migration/src/lib.rs

Co-authored-by: cheme <emericchevalier.pro@gmail.com>

* Update frame/state-trie-migration/src/lib.rs

Co-authored-by: cheme <emericchevalier.pro@gmail.com>

* Slightly better termination

* some final tweaks

* Fix tests

* Restrict access to signed migrations

* address most of the review comments

* fix defensive

* New simplified code

* Fix weights

* fmt

* Update frame/state-trie-migration/src/lib.rs

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* make the tests correctly fail

* Fix build

* Fix build

* try and fix the benchmarks

* fix build

* Fix cargo file

* Fix runtime deposit

* make rustdoc happy

* cargo run --quiet --profile=production  --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_state_trie_migration --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/state-trie-migration/src/weights.rs --template=./.maintain/frame-weight-template.hbs

Co-authored-by: cheme <emericchevalier.pro@gmail.com>
Co-authored-by: Guillaume Thiolliere <gui.thiolliere@gmail.com>
Co-authored-by: Joshy Orndorff <JoshOrndorff@users.noreply.github.com>
Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>
Co-authored-by: Parity Bot <admin@parity.io>
---
 substrate/Cargo.lock                          |   31 +
 substrate/bin/node/runtime/Cargo.toml         |    4 +
 substrate/bin/node/runtime/src/lib.rs         |   26 +-
 .../bags-list/remote-tests/src/migration.rs   |    3 +-
 .../remote-tests/src/sanity_check.rs          |    3 +-
 .../bags-list/remote-tests/src/snapshot.rs    |    2 +-
 .../frame/state-trie-migration/Cargo.toml     |   57 +
 .../frame/state-trie-migration/src/lib.rs     | 1557 +++++++++++++++++
 .../frame/state-trie-migration/src/weights.rs |  137 ++
 substrate/frame/support/src/traits/misc.rs    |    8 +-
 substrate/primitives/core/src/lib.rs          |    2 +-
 .../primitives/state-machine/src/testing.rs   |    7 +-
 .../state-machine/src/trie_backend_essence.rs |   78 +-
 substrate/primitives/storage/src/lib.rs       |   13 +-
 .../frame/remote-externalities/src/lib.rs     |   66 +-
 .../utils/frame/try-runtime/cli/src/lib.rs    |    1 +
 16 files changed, 1954 insertions(+), 41 deletions(-)
 create mode 100644 substrate/frame/state-trie-migration/Cargo.toml
 create mode 100644 substrate/frame/state-trie-migration/src/lib.rs
 create mode 100644 substrate/frame/state-trie-migration/src/weights.rs

diff --git a/substrate/Cargo.lock b/substrate/Cargo.lock
index 9cb1eae0a91..bfe18017402 100644
--- a/substrate/Cargo.lock
+++ b/substrate/Cargo.lock
@@ -5010,6 +5010,7 @@ dependencies = [
  "pallet-society",
  "pallet-staking",
  "pallet-staking-reward-curve",
+ "pallet-state-trie-migration",
  "pallet-sudo",
  "pallet-timestamp",
  "pallet-tips",
@@ -6419,6 +6420,30 @@ dependencies = [
  "sp-arithmetic",
 ]
 
+[[package]]
+name = "pallet-state-trie-migration"
+version = "4.0.0-dev"
+dependencies = [
+ "frame-benchmarking",
+ "frame-support",
+ "frame-system",
+ "log 0.4.14",
+ "pallet-balances",
+ "parity-scale-codec",
+ "parking_lot 0.12.0",
+ "remote-externalities",
+ "scale-info",
+ "serde",
+ "sp-core",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
+ "sp-tracing",
+ "thousands",
+ "tokio",
+ "zstd",
+]
+
 [[package]]
 name = "pallet-sudo"
 version = "4.0.0-dev"
@@ -10838,6 +10863,12 @@ dependencies = [
  "syn",
 ]
 
+[[package]]
+name = "thousands"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3bf63baf9f5039dadc247375c29eb13706706cfde997d0330d05aa63a77d8820"
+
 [[package]]
 name = "thread_local"
 version = "1.1.3"
diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml
index e7bd50c3d18..0572345fa0c 100644
--- a/substrate/bin/node/runtime/Cargo.toml
+++ b/substrate/bin/node/runtime/Cargo.toml
@@ -87,6 +87,7 @@ pallet-session = { version = "4.0.0-dev", features = [ "historical" ], path = ".
 pallet-session-benchmarking = { version = "4.0.0-dev", path = "../../../frame/session/benchmarking", default-features = false, optional = true }
 pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/staking" }
 pallet-staking-reward-curve = { version = "4.0.0-dev", default-features = false, path = "../../../frame/staking/reward-curve" }
+pallet-state-trie-migration = { version = "4.0.0-dev", default-features = false, path = "../../../frame/state-trie-migration" }
 pallet-scheduler = { version = "4.0.0-dev", default-features = false, path = "../../../frame/scheduler" }
 pallet-society = { version = "4.0.0-dev", default-features = false, path = "../../../frame/society" }
 pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../../frame/sudo" }
@@ -153,6 +154,7 @@ std = [
 	"sp-runtime/std",
 	"sp-staking/std",
 	"pallet-staking/std",
+	"pallet-state-trie-migration/std",
 	"sp-session/std",
 	"pallet-sudo/std",
 	"frame-support/std",
@@ -214,6 +216,7 @@ runtime-benchmarks = [
 	"pallet-session-benchmarking",
 	"pallet-society/runtime-benchmarks",
 	"pallet-staking/runtime-benchmarks",
+	"pallet-state-trie-migration/runtime-benchmarks",
 	"pallet-timestamp/runtime-benchmarks",
 	"pallet-tips/runtime-benchmarks",
 	"pallet-transaction-storage/runtime-benchmarks",
@@ -261,6 +264,7 @@ try-runtime = [
 	"pallet-session/try-runtime",
 	"pallet-society/try-runtime",
 	"pallet-staking/try-runtime",
+	"pallet-state-trie-migration/try-runtime",
 	"pallet-sudo/try-runtime",
 	"pallet-timestamp/try-runtime",
 	"pallet-tips/try-runtime",
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index 0b0c033ae85..f12bf8a8836 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -39,7 +39,7 @@ use frame_support::{
 };
 use frame_system::{
 	limits::{BlockLength, BlockWeights},
-	EnsureRoot,
+	EnsureRoot, EnsureSigned,
 };
 pub use node_primitives::{AccountId, Signature};
 use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Index, Moment};
@@ -1367,6 +1367,28 @@ impl pallet_whitelist::Config for Runtime {
 	type WeightInfo = pallet_whitelist::weights::SubstrateWeight<Runtime>;
 }
 
+parameter_types! {
+	pub const SignedMigrationMaxLimits: pallet_state_trie_migration::MigrationLimits =
+		pallet_state_trie_migration::MigrationLimits { size: 1024 * 1024 / 2, item: 512 };
+	pub const MigrationSignedDepositPerItem: Balance = 1 * CENTS;
+	pub const MigrationSignedDepositBase: Balance = 20 * DOLLARS;
+}
+
+impl pallet_state_trie_migration::Config for Runtime {
+	type Event = Event;
+	type ControlOrigin = EnsureRoot<AccountId>;
+	type Currency = Balances;
+	type SignedDepositPerItem = MigrationSignedDepositPerItem;
+	type SignedDepositBase = MigrationSignedDepositBase;
+	type SignedMigrationMaxLimits = SignedMigrationMaxLimits;
+	// Warning: this is not advised, as it might allow the chain to be temporarily DOS-ed.
+	// Preferably, if the chain's governance/maintenance team is planning on using a specific
+	// account for the migration, put it here to make sure only that account can trigger the signed
+	// migrations.
+	type SignedFilter = EnsureSigned<Self::AccountId>;
+	type WeightInfo = ();
+}
+
 construct_runtime!(
 	pub enum Runtime where
 		Block = Block,
@@ -1418,6 +1440,7 @@ construct_runtime!(
 		Uniques: pallet_uniques,
 		TransactionStorage: pallet_transaction_storage,
 		BagsList: pallet_bags_list,
+		StateTrieMigration: pallet_state_trie_migration,
 		ChildBounties: pallet_child_bounties,
 		Referenda: pallet_referenda,
 		ConvictionVoting: pallet_conviction_voting,
@@ -1512,6 +1535,7 @@ mod benches {
 		[pallet_scheduler, Scheduler]
 		[pallet_session, SessionBench::<Runtime>]
 		[pallet_staking, Staking]
+		[pallet_state_trie_migration, StateTrieMigration]
 		[frame_system, SystemBench::<Runtime>]
 		[pallet_timestamp, Timestamp]
 		[pallet_tips, Tips]
diff --git a/substrate/frame/bags-list/remote-tests/src/migration.rs b/substrate/frame/bags-list/remote-tests/src/migration.rs
index aadbbdae3d6..4d5169fcc6d 100644
--- a/substrate/frame/bags-list/remote-tests/src/migration.rs
+++ b/substrate/frame/bags-list/remote-tests/src/migration.rs
@@ -34,8 +34,7 @@ pub async fn execute<Runtime: RuntimeT, Block: BlockT + DeserializeOwned>(
 		.mode(Mode::Online(OnlineConfig {
 			transport: ws_url.to_string().into(),
 			pallets: vec![pallet_staking::Pallet::<Runtime>::name().to_string()],
-			at: None,
-			state_snapshot: None,
+			..Default::default()
 		}))
 		.build()
 		.await
diff --git a/substrate/frame/bags-list/remote-tests/src/sanity_check.rs b/substrate/frame/bags-list/remote-tests/src/sanity_check.rs
index adab1ae5477..f2b6881edea 100644
--- a/substrate/frame/bags-list/remote-tests/src/sanity_check.rs
+++ b/substrate/frame/bags-list/remote-tests/src/sanity_check.rs
@@ -35,8 +35,7 @@ pub async fn execute<Runtime: crate::RuntimeT, Block: BlockT + DeserializeOwned>
 		.mode(Mode::Online(OnlineConfig {
 			transport: ws_url.to_string().into(),
 			pallets: vec![pallet_bags_list::Pallet::<Runtime>::name().to_string()],
-			at: None,
-			state_snapshot: None,
+			..Default::default()
 		}))
 		.inject_hashed_prefix(&<pallet_staking::Bonded<Runtime>>::prefix_hash())
 		.inject_hashed_prefix(&<pallet_staking::Ledger<Runtime>>::prefix_hash())
diff --git a/substrate/frame/bags-list/remote-tests/src/snapshot.rs b/substrate/frame/bags-list/remote-tests/src/snapshot.rs
index 78f462e55b8..241b64b3661 100644
--- a/substrate/frame/bags-list/remote-tests/src/snapshot.rs
+++ b/substrate/frame/bags-list/remote-tests/src/snapshot.rs
@@ -35,7 +35,7 @@ pub async fn execute<Runtime: crate::RuntimeT, Block: BlockT + DeserializeOwned>
 			// is bags-list.
 			pallets: vec![pallet_bags_list::Pallet::<Runtime>::name().to_string()],
 			at: None,
-			state_snapshot: None,
+			..Default::default()
 		}))
 		.inject_hashed_prefix(&<pallet_staking::Bonded<Runtime>>::prefix_hash())
 		.inject_hashed_prefix(&<pallet_staking::Ledger<Runtime>>::prefix_hash())
diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml
new file mode 100644
index 00000000000..fb8bccb52d1
--- /dev/null
+++ b/substrate/frame/state-trie-migration/Cargo.toml
@@ -0,0 +1,57 @@
+[package]
+name = "pallet-state-trie-migration"
+version = "4.0.0-dev"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2021"
+license = "Apache-2.0"
+homepage = "https://substrate.dev"
+repository = "https://github.com/paritytech/substrate/"
+description = "FRAME pallet migration of trie"
+readme = "README.md"
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[dependencies]
+scale-info = { version = "2.0.1", default-features = false, features = ["derive"] }
+codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false }
+log = { version = "0.4.14", default-features = false }
+
+sp-std = { default-features = false, path = "../../primitives/std" }
+sp-io = { default-features = false, path = "../../primitives/io" }
+sp-core = { default-features = false, path = "../../primitives/core" }
+sp-runtime = { default-features = false, path = "../../primitives/runtime" }
+
+frame-support = { default-features = false, path = "../support" }
+frame-system = { default-features = false, path = "../system" }
+frame-benchmarking = { default-features = false, path = "../benchmarking", optional = true }
+
+serde = { version = "1.0.133", optional = true }
+thousands = { version = "0.2.0", optional = true }
+remote-externalities = { path = "../../utils/frame/remote-externalities", optional = true }
+zstd = { version = "0.9.0", optional = true }
+
+[dev-dependencies]
+pallet-balances = { path = "../balances" }
+parking_lot = "0.12.0"
+sp-tracing = { path = "../../primitives/tracing" }
+tokio = { version = "1.10", features = ["macros"] }
+
+[features]
+default = ["std"]
+std = [
+	"log/std",
+	"scale-info/std",
+	"codec/std",
+	"frame-benchmarking/std",
+	"frame-support/std",
+	"frame-system/std",
+	"sp-core/std",
+	"sp-io/std",
+	"sp-runtime/std",
+	"sp-std/std"
+]
+runtime-benchmarks = ["frame-benchmarking"]
+try-runtime = ["frame-support/try-runtime"]
+
+remote-test = [ "std", "zstd", "serde", "thousands", "remote-externalities" ]
diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs
new file mode 100644
index 00000000000..4de130e9ac0
--- /dev/null
+++ b/substrate/frame/state-trie-migration/src/lib.rs
@@ -0,0 +1,1557 @@
+// This file is part of Substrate.
+
+// Copyright (C) 2022 Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! # Pallet State Trie Migration
+//!
+//! Reads and writes all keys and values in the entire state in a systematic way. This is useful for
+//! upgrading a chain to [`sp-core::StateVersion::V1`], where all keys need to be touched.
+//!
+//! ## Migration Types
+//!
+//! This pallet provides 2 ways to do this, each of which is suited for a particular use-case, and
+//! can be enabled independently.
+//!
+//! ### Auto migration
+//!
+//! This system will try and migrate all keys by continuously using `on_initialize`. It is only
+//! sensible for a relay chain or a solo chain, where going slightly over weight is not a problem.
+//! It can be configured so that the migration takes at most `n` items and tries to not go over `x`
+//! bytes, but the latter is not guaranteed.
+//!
+//! For example, if a chain contains keys of 1 byte size, the `on_initialize` could read up to `x -
+//! 1` bytes from `n` different keys, while the next key is suddenly `:code:`, and there is no way
+//! to bail out of this.
+//!
+//! ### Signed migration
+//!
+//! As a backup, the migration process can be set in motion via signed transactions that basically
+//! say in advance how many items and how many bytes they will consume, and pay for it as well. This
+//! can be a good safe alternative, if the former system is not desirable.
+//!
+//! The (minor) caveat of this approach is that we cannot know in advance how many bytes reading a
+//! certain number of keys will incur. To overcome this, the runtime needs to configure this pallet
+//! with a `SignedDepositPerItem`. This is the per-item deposit that the origin of the signed
+//! migration transactions need to have in their account (on top of the normal fee) and if the size
+//! witness data that they claim is incorrect, this deposit is slashed.
+//!
+//! ---
+//!
+//! Initially, this pallet does not contain any auto migration. They must be manually enabled by the
+//! `ControlOrigin`.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+pub use pallet::*;
+
+const LOG_TARGET: &'static str = "runtime::state-trie-migration";
+
+#[macro_export]
+macro_rules! log {
+	($level:tt, $patter:expr $(, $values:expr)* $(,)?) => {
+		log::$level!(
+			target: crate::LOG_TARGET,
+			concat!("[{:?}] 🤖 ", $patter), frame_system::Pallet::<T>::block_number() $(, $values)*
+		)
+	};
+}
+
+#[frame_support::pallet]
+pub mod pallet {
+	use frame_support::{
+		dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo},
+		ensure,
+		pallet_prelude::*,
+		traits::{Currency, Get},
+	};
+	use frame_system::{self, pallet_prelude::*};
+	use sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX;
+	use sp_runtime::{
+		self,
+		traits::{Saturating, Zero},
+	};
+	use sp_std::prelude::*;
+
+	pub(crate) type BalanceOf<T> =
+		<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
+
+	/// The weight information of this pallet.
+	pub trait WeightInfo {
+		fn process_top_key(x: u32) -> Weight;
+		fn continue_migrate() -> Weight;
+		fn continue_migrate_wrong_witness() -> Weight;
+		fn migrate_custom_top_fail() -> Weight;
+		fn migrate_custom_top_success() -> Weight;
+		fn migrate_custom_child_fail() -> Weight;
+		fn migrate_custom_child_success() -> Weight;
+	}
+
+	impl WeightInfo for () {
+		fn process_top_key(_: u32) -> Weight {
+			1000000
+		}
+		fn continue_migrate() -> Weight {
+			1000000
+		}
+		fn continue_migrate_wrong_witness() -> Weight {
+			1000000
+		}
+		fn migrate_custom_top_fail() -> Weight {
+			1000000
+		}
+		fn migrate_custom_top_success() -> Weight {
+			1000000
+		}
+		fn migrate_custom_child_fail() -> Weight {
+			1000000
+		}
+		fn migrate_custom_child_success() -> Weight {
+			1000000
+		}
+	}
+
+	/// A migration task stored in state.
+	///
+	/// It tracks the last top and child keys read.
+	#[derive(Clone, Encode, Decode, scale_info::TypeInfo, PartialEq, Eq)]
+	#[codec(mel_bound(T: Config))]
+	#[scale_info(skip_type_params(T))]
+	pub struct MigrationTask<T: Config> {
+		/// The last top key that we migrated.
+		///
+		/// If it does not exist, it means that the migration is done and no further keys exist.
+		pub(crate) last_top: Option<Vec<u8>>,
+		/// The last child key that we have processed.
+		///
+		/// This is a child key under the current `self.last_top`.
+		///
+		/// If this is set, no further top keys are processed until the child key migration is
+		/// complete.
+		pub(crate) last_child: Option<Vec<u8>>,
+
+		/// A marker to indicate if the previous tick was a child tree migration or not.
+		pub(crate) prev_tick_child: bool,
+
+		/// Dynamic counter for the number of items that we have processed in this execution from
+		/// the top trie.
+		///
+		/// It is not written to storage.
+		#[codec(skip)]
+		pub(crate) dyn_top_items: u32,
+		/// Dynamic counter for the number of items that we have processed in this execution from
+		/// any child trie.
+		///
+		/// It is not written to storage.
+		#[codec(skip)]
+		pub(crate) dyn_child_items: u32,
+
+		/// Dynamic counter for for the byte size of items that we have processed in this
+		/// execution.
+		///
+		/// It is not written to storage.
+		#[codec(skip)]
+		pub(crate) dyn_size: u32,
+
+		/// The total size of the migration, over all executions.
+		///
+		/// This only kept around for bookkeeping and debugging.
+		pub(crate) size: u32,
+		/// The total count of top keys in the migration, over all executions.
+		///
+		/// This only kept around for bookkeeping and debugging.
+		pub(crate) top_items: u32,
+		/// The total count of child keys in the migration, over all executions.
+		///
+		/// This only kept around for bookkeeping and debugging.
+		pub(crate) child_items: u32,
+
+		#[codec(skip)]
+		pub(crate) _ph: sp_std::marker::PhantomData<T>,
+	}
+
+	impl<T: Config> sp_std::fmt::Debug for MigrationTask<T> {
+		fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result {
+			f.debug_struct("MigrationTask")
+				.field(
+					"top",
+					&self.last_top.as_ref().map(|d| sp_core::hexdisplay::HexDisplay::from(d)),
+				)
+				.field(
+					"child",
+					&self.last_child.as_ref().map(|d| sp_core::hexdisplay::HexDisplay::from(d)),
+				)
+				.field("prev_tick_child", &self.prev_tick_child)
+				.field("dyn_top_items", &self.dyn_top_items)
+				.field("dyn_child_items", &self.dyn_child_items)
+				.field("dyn_size", &self.dyn_size)
+				.field("size", &self.size)
+				.field("top_items", &self.top_items)
+				.field("child_items", &self.child_items)
+				.finish()
+		}
+	}
+
+	impl<T: Config> Default for MigrationTask<T> {
+		fn default() -> Self {
+			Self {
+				last_top: Some(Default::default()),
+				last_child: Default::default(),
+				dyn_child_items: Default::default(),
+				dyn_top_items: Default::default(),
+				dyn_size: Default::default(),
+				prev_tick_child: Default::default(),
+				_ph: Default::default(),
+				size: Default::default(),
+				top_items: Default::default(),
+				child_items: Default::default(),
+			}
+		}
+	}
+
+	impl<T: Config> MigrationTask<T> {
+		/// Return true if the task is finished.
+		pub(crate) fn finished(&self) -> bool {
+			self.last_top.is_none() && self.last_child.is_none()
+		}
+
+		/// Check if there's any work left, or if we have exhausted the limits already.
+		fn exhausted(&self, limits: MigrationLimits) -> bool {
+			self.dyn_total_items() >= limits.item || self.dyn_size >= limits.size
+		}
+
+		/// get the total number of keys affected by the current task.
+		pub(crate) fn dyn_total_items(&self) -> u32 {
+			self.dyn_child_items.saturating_add(self.dyn_top_items)
+		}
+
+		/// Migrate keys until either of the given limits are exhausted, or if no more top keys
+		/// exist.
+		///
+		/// Note that this can return after the **first** migration tick that causes exhaustion,
+		/// specifically in the case of the `size` constrain. The reason for this is that before
+		/// reading a key, we simply cannot know how many bytes it is. In other words, this should
+		/// not be used in any environment where resources are strictly bounded (e.g. a parachain),
+		/// but it is acceptable otherwise (relay chain, offchain workers).
+		pub fn migrate_until_exhaustion(&mut self, limits: MigrationLimits) {
+			log!(debug, "running migrations on top of {:?} until {:?}", self, limits);
+
+			if limits.item.is_zero() || limits.size.is_zero() {
+				// handle this minor edge case, else we would call `migrate_tick` at least once.
+				log!(warn, "limits are zero. stopping");
+				return
+			}
+
+			while !self.exhausted(limits) && !self.finished() {
+				self.migrate_tick();
+			}
+
+			// accumulate dynamic data into the storage items.
+			self.size = self.size.saturating_add(self.dyn_size);
+			self.child_items = self.child_items.saturating_add(self.dyn_child_items);
+			self.top_items = self.top_items.saturating_add(self.dyn_top_items);
+			log!(debug, "finished with {:?}", self);
+		}
+
+		/// Migrate AT MOST ONE KEY. This can be either a top or a child key.
+		///
+		/// This function is *the* core of this entire pallet.
+		fn migrate_tick(&mut self) {
+			match (self.last_top.as_ref(), self.last_child.as_ref()) {
+				(Some(_), Some(_)) => {
+					// we're in the middle of doing work on a child tree.
+					self.migrate_child();
+				},
+				(Some(ref top_key), None) => {
+					// we have a top key and no child key. 3 possibilities exist:
+					// 1. we continue the top key migrations.
+					// 2. this is the root of a child key, and we start processing child keys (and
+					// should call `migrate_child`).
+					// 3. this is the root of a child key, and we are finishing all child-keys (and
+					// should call `migrate_top`).
+
+					// NOTE: this block is written intentionally to verbosely for easy of
+					// verification.
+					match (
+						top_key.starts_with(DEFAULT_CHILD_STORAGE_KEY_PREFIX),
+						self.prev_tick_child,
+					) {
+						(false, false) => {
+							// continue the top key migration
+							self.migrate_top();
+						},
+						(true, false) => {
+							self.last_child = Some(Default::default());
+							self.migrate_child();
+							self.prev_tick_child = true;
+						},
+						(true, true) => {
+							// we're done with migrating a child-root.
+							self.prev_tick_child = false;
+							self.migrate_top();
+						},
+						(false, true) => {
+							// should never happen.
+							log!(error, "LOGIC ERROR: unreachable code [0].");
+							Pallet::<T>::halt();
+						},
+					};
+				},
+				(None, Some(_)) => {
+					log!(error, "LOGIC ERROR: unreachable code [1].");
+					Pallet::<T>::halt()
+				},
+				(None, None) => {
+					// nada
+				},
+			}
+		}
+
+		/// Migrate the current child key, setting it to its new value, if one exists.
+		///
+		/// It updates the dynamic counters.
+		fn migrate_child(&mut self) {
+			use sp_io::default_child_storage as child_io;
+			let (last_child, last_top) = match (&self.last_child, &self.last_top) {
+				(Some(last_child), Some(last_top)) => (last_child, last_top),
+				_ => {
+					// defensive: this function is only called when both of these values exist.
+					// much that we can do otherwise..
+					frame_support::defensive!("cannot migrate child key.");
+					return
+				},
+			};
+
+			let child_root = Pallet::<T>::transform_child_key_or_halt(&last_top);
+			let maybe_current_child = child_io::next_key(child_root, &last_child);
+			if let Some(ref current_child) = maybe_current_child {
+				let added_size = if let Some(data) = child_io::get(child_root, &current_child) {
+					child_io::set(child_root, current_child, &data);
+					data.len() as u32
+				} else {
+					Zero::zero()
+				};
+				self.dyn_size = self.dyn_size.saturating_add(added_size);
+				self.dyn_child_items.saturating_inc();
+			}
+
+			log!(trace, "migrated a child key, next_child_key: {:?}", maybe_current_child);
+			self.last_child = maybe_current_child;
+		}
+
+		/// Migrate the current top key, setting it to its new value, if one exists.
+		///
+		/// It updates the dynamic counters.
+		fn migrate_top(&mut self) {
+			let last_top = match &self.last_top {
+				Some(last_top) => last_top,
+				None => {
+					// defensive: this function is only called when this value exist.
+					// much that we can do otherwise..
+					frame_support::defensive!("cannot migrate top key.");
+					return
+				},
+			};
+
+			let maybe_current_top = sp_io::storage::next_key(last_top);
+			if let Some(ref current_top) = maybe_current_top {
+				let added_size = if let Some(data) = sp_io::storage::get(&current_top) {
+					sp_io::storage::set(&current_top, &data);
+					data.len() as u32
+				} else {
+					Zero::zero()
+				};
+				self.dyn_size = self.dyn_size.saturating_add(added_size);
+				self.dyn_top_items.saturating_inc();
+			}
+
+			log!(trace, "migrated a top key, next_top_key = {:?}", maybe_current_top);
+			self.last_top = maybe_current_top;
+		}
+	}
+
+	/// The limits of a migration.
+	#[derive(Clone, Copy, Encode, Decode, scale_info::TypeInfo, Default, Debug, PartialEq, Eq)]
+	pub struct MigrationLimits {
+		/// The byte size limit.
+		pub size: u32,
+		/// The number of keys limit.
+		pub item: u32,
+	}
+
+	/// How a migration was computed.
+	#[derive(Clone, Copy, Encode, Decode, scale_info::TypeInfo, Debug, PartialEq, Eq)]
+	pub enum MigrationCompute {
+		/// A signed origin triggered the migration.
+		Signed,
+		/// An automatic task triggered the migration.
+		Auto,
+	}
+
+	/// Inner events of this pallet.
+	#[pallet::event]
+	#[pallet::generate_deposit(pub(super) fn deposit_event)]
+	pub enum Event<T: Config> {
+		/// Given number of `(top, child)` keys were migrated respectively, with the given
+		/// `compute`.
+		Migrated { top: u32, child: u32, compute: MigrationCompute },
+		/// Some account got slashed by the given amount.
+		Slashed { who: T::AccountId, amount: BalanceOf<T> },
+		/// The auto migration task finished.
+		AutoMigrationFinished,
+		/// Migration got halted.
+		Halted,
+	}
+
+	/// The outer Pallet struct.
+	#[pallet::pallet]
+	#[pallet::generate_store(pub(crate) trait Store)]
+	#[pallet::without_storage_info]
+	pub struct Pallet<T>(_);
+
+	/// Configurations of this pallet.
+	#[pallet::config]
+	pub trait Config: frame_system::Config {
+		/// Origin that can control the configurations of this pallet.
+		type ControlOrigin: frame_support::traits::EnsureOrigin<Self::Origin>;
+
+		/// Filter on which origin that trigger the manual migrations.
+		type SignedFilter: EnsureOrigin<Self::Origin, Success = Self::AccountId>;
+
+		/// The overarching event type.
+		type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>;
+
+		/// The currency provider type.
+		type Currency: Currency<Self::AccountId>;
+
+		/// The amount of deposit collected per item in advance, for signed migrations.
+		///
+		/// This should reflect the average storage value size in the worse case.
+		type SignedDepositPerItem: Get<BalanceOf<Self>>;
+
+		/// The base value of [`Config::SignedDepositPerItem`].
+		///
+		/// Final deposit is `items * SignedDepositPerItem + SignedDepositBase`.
+		type SignedDepositBase: Get<BalanceOf<Self>>;
+
+		/// The maximum limits that the signed migration could use.
+		#[pallet::constant]
+		type SignedMigrationMaxLimits: Get<MigrationLimits>;
+
+		/// The weight information of this pallet.
+		type WeightInfo: WeightInfo;
+	}
+
+	/// Migration progress.
+	///
+	/// This stores the snapshot of the last migrated keys. It can be set into motion and move
+	/// forward by any of the means provided by this pallet.
+	#[pallet::storage]
+	#[pallet::getter(fn migration_process)]
+	pub type MigrationProcess<T> = StorageValue<_, MigrationTask<T>, ValueQuery>;
+
+	/// The limits that are imposed on automatic migrations.
+	///
+	/// If set to None, then no automatic migration happens.
+	#[pallet::storage]
+	#[pallet::getter(fn auto_limits)]
+	pub type AutoLimits<T> = StorageValue<_, Option<MigrationLimits>, ValueQuery>;
+
+	#[pallet::error]
+	pub enum Error<T> {
+		/// max signed limits not respected.
+		MaxSignedLimits,
+		/// submitter does not have enough funds.
+		NotEnoughFunds,
+		/// bad witness data provided.
+		BadWitness,
+		/// upper bound of size is exceeded,
+		SizeUpperBoundExceeded,
+	}
+
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {
+		/// Control the automatic migration.
+		///
+		/// The dispatch origin of this call must be [`Config::ControlOrigin`].
+		#[pallet::weight(T::DbWeight::get().reads_writes(1, 1))]
+		pub fn control_auto_migration(
+			origin: OriginFor<T>,
+			maybe_config: Option<MigrationLimits>,
+		) -> DispatchResultWithPostInfo {
+			T::ControlOrigin::ensure_origin(origin)?;
+			AutoLimits::<T>::put(maybe_config);
+			Ok(().into())
+		}
+
+		/// Continue the migration for the given `limits`.
+		///
+		/// The dispatch origin of this call can be any signed account.
+		///
+		/// This transaction has NO MONETARY INCENTIVES. calling it will not reward anyone. Albeit,
+		/// Upon successful execution, the transaction fee is returned.
+		///
+		/// The (potentially over-estimated) of the byte length of all the data read must be
+		/// provided for up-front fee-payment and weighing. In essence, the caller is guaranteeing
+		/// that executing the current `MigrationTask` with the given `limits` will not exceed
+		/// `real_size_upper` bytes of read data.
+		///
+		/// The `witness_task` is merely a helper to prevent the caller from being slashed or
+		/// generally trigger a migration that they do not intend. This parameter is just a message
+		/// from caller, saying that they believed `witness_task` was the last state of the
+		/// migration, and they only wish for their transaction to do anything, if this assumption
+		/// holds. In case `witness_task` does not match, the transaction fails.
+		///
+		/// Based on the documentation of [`MigrationTask::migrate_until_exhaustion`], the
+		/// recommended way of doing this is to pass a `limit` that only bounds `count`, as the
+		/// `size` limit can always be overwritten.
+		#[pallet::weight(
+			// the migration process
+			Pallet::<T>::dynamic_weight(limits.item, * real_size_upper)
+			// rest of the operations, like deposit etc.
+			+ T::WeightInfo::continue_migrate()
+		)]
+		pub fn continue_migrate(
+			origin: OriginFor<T>,
+			limits: MigrationLimits,
+			real_size_upper: u32,
+			witness_task: MigrationTask<T>,
+		) -> DispatchResultWithPostInfo {
+			let who = T::SignedFilter::ensure_origin(origin)?;
+
+			let max_limits = T::SignedMigrationMaxLimits::get();
+			ensure!(
+				limits.size <= max_limits.size && limits.item <= max_limits.item,
+				Error::<T>::MaxSignedLimits,
+			);
+
+			// ensure they can pay more than the fee.
+			let deposit = T::SignedDepositPerItem::get().saturating_mul(limits.item.into());
+			ensure!(T::Currency::can_slash(&who, deposit), Error::<T>::NotEnoughFunds);
+
+			let mut task = Self::migration_process();
+			ensure!(
+				task == witness_task,
+				DispatchErrorWithPostInfo {
+					error: Error::<T>::BadWitness.into(),
+					post_info: PostDispatchInfo {
+						actual_weight: Some(T::WeightInfo::continue_migrate_wrong_witness()),
+						pays_fee: Pays::Yes
+					}
+				}
+			);
+			task.migrate_until_exhaustion(limits);
+
+			// ensure that the migration witness data was correct.
+			if real_size_upper < task.dyn_size {
+				// let the imbalance burn.
+				let (_imbalance, _remainder) = T::Currency::slash(&who, deposit);
+				Self::deposit_event(Event::<T>::Slashed { who, amount: deposit });
+				debug_assert!(_remainder.is_zero());
+				return Err(Error::<T>::SizeUpperBoundExceeded.into())
+			}
+
+			Self::deposit_event(Event::<T>::Migrated {
+				top: task.dyn_top_items,
+				child: task.dyn_child_items,
+				compute: MigrationCompute::Signed,
+			});
+
+			// refund and correct the weight.
+			let actual_weight = Some(
+				Pallet::<T>::dynamic_weight(limits.item, task.dyn_size)
+					.saturating_add(T::WeightInfo::continue_migrate()),
+			);
+
+			MigrationProcess::<T>::put(task);
+			Ok((actual_weight, Pays::No).into())
+		}
+
+		/// Migrate the list of top keys by iterating each of them one by one.
+		///
+		/// This does not affect the global migration process tracker ([`MigrationProcess`]), and
+		/// should only be used in case any keys are leftover due to a bug.
+		#[pallet::weight(
+			T::WeightInfo::migrate_custom_top_success()
+				.max(T::WeightInfo::migrate_custom_top_fail())
+			.saturating_add(
+				Pallet::<T>::dynamic_weight(keys.len() as u32, *witness_size)
+			)
+		)]
+		pub fn migrate_custom_top(
+			origin: OriginFor<T>,
+			keys: Vec<Vec<u8>>,
+			witness_size: u32,
+		) -> DispatchResultWithPostInfo {
+			let who = T::SignedFilter::ensure_origin(origin)?;
+
+			// ensure they can pay more than the fee.
+			let deposit = T::SignedDepositBase::get().saturating_add(
+				T::SignedDepositPerItem::get().saturating_mul((keys.len() as u32).into()),
+			);
+			ensure!(T::Currency::can_slash(&who, deposit), "not enough funds");
+
+			let mut dyn_size = 0u32;
+			for key in &keys {
+				if let Some(data) = sp_io::storage::get(&key) {
+					dyn_size = dyn_size.saturating_add(data.len() as u32);
+					sp_io::storage::set(key, &data);
+				}
+			}
+
+			if dyn_size > witness_size {
+				let (_imbalance, _remainder) = T::Currency::slash(&who, deposit);
+				Self::deposit_event(Event::<T>::Slashed { who, amount: deposit });
+				debug_assert!(_remainder.is_zero());
+				Err("wrong witness data".into())
+			} else {
+				Self::deposit_event(Event::<T>::Migrated {
+					top: keys.len() as u32,
+					child: 0,
+					compute: MigrationCompute::Signed,
+				});
+				Ok(PostDispatchInfo {
+					actual_weight: Some(
+						T::WeightInfo::migrate_custom_top_success().saturating_add(
+							Pallet::<T>::dynamic_weight(keys.len() as u32, dyn_size),
+						),
+					),
+					pays_fee: Pays::Yes,
+				})
+			}
+		}
+
+		/// Migrate the list of child keys by iterating each of them one by one.
+		///
+		/// All of the given child keys must be present under one `child_root`.
+		///
+		/// This does not affect the global migration process tracker ([`MigrationProcess`]), and
+		/// should only be used in case any keys are leftover due to a bug.
+		#[pallet::weight(
+			T::WeightInfo::migrate_custom_child_success()
+				.max(T::WeightInfo::migrate_custom_child_fail())
+			.saturating_add(
+				Pallet::<T>::dynamic_weight(child_keys.len() as u32, *total_size)
+			)
+		)]
+		pub fn migrate_custom_child(
+			origin: OriginFor<T>,
+			root: Vec<u8>,
+			child_keys: Vec<Vec<u8>>,
+			total_size: u32,
+		) -> DispatchResultWithPostInfo {
+			use sp_io::default_child_storage as child_io;
+			let who = T::SignedFilter::ensure_origin(origin)?;
+
+			// ensure they can pay more than the fee.
+			let deposit = T::SignedDepositBase::get().saturating_add(
+				T::SignedDepositPerItem::get().saturating_mul((child_keys.len() as u32).into()),
+			);
+			sp_std::if_std! {
+				println!("+ {:?} / {:?} / {:?}", who, deposit, T::Currency::free_balance(&who));
+			}
+			ensure!(T::Currency::can_slash(&who, deposit), "not enough funds");
+
+			let mut dyn_size = 0u32;
+			let transformed_child_key = Self::transform_child_key(&root).ok_or("bad child key")?;
+			for child_key in &child_keys {
+				if let Some(data) = child_io::get(transformed_child_key, &child_key) {
+					dyn_size = dyn_size.saturating_add(data.len() as u32);
+					child_io::set(transformed_child_key, &child_key, &data);
+				}
+			}
+
+			if dyn_size != total_size {
+				let (_imbalance, _remainder) = T::Currency::slash(&who, deposit);
+				debug_assert!(_remainder.is_zero());
+				Self::deposit_event(Event::<T>::Slashed { who, amount: deposit });
+				Err(DispatchErrorWithPostInfo {
+					error: "bad witness".into(),
+					post_info: PostDispatchInfo {
+						actual_weight: Some(T::WeightInfo::migrate_custom_child_fail()),
+						pays_fee: Pays::Yes,
+					},
+				})
+			} else {
+				Self::deposit_event(Event::<T>::Migrated {
+					top: 0,
+					child: child_keys.len() as u32,
+					compute: MigrationCompute::Signed,
+				});
+				Ok(PostDispatchInfo {
+					actual_weight: Some(
+						T::WeightInfo::migrate_custom_child_success().saturating_add(
+							Pallet::<T>::dynamic_weight(child_keys.len() as u32, total_size),
+						),
+					),
+					pays_fee: Pays::Yes,
+				})
+			}
+		}
+	}
+
+	#[pallet::hooks]
+	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
+		fn on_initialize(_: BlockNumberFor<T>) -> Weight {
+			if let Some(limits) = Self::auto_limits() {
+				let mut task = Self::migration_process();
+				task.migrate_until_exhaustion(limits);
+				let weight = Self::dynamic_weight(task.dyn_total_items(), task.dyn_size);
+
+				log!(
+					info,
+					"migrated {} top keys, {} child keys, and a total of {} bytes.",
+					task.dyn_top_items,
+					task.dyn_child_items,
+					task.dyn_size,
+				);
+
+				if task.finished() {
+					Self::deposit_event(Event::<T>::AutoMigrationFinished);
+					AutoLimits::<T>::kill();
+				} else {
+					Self::deposit_event(Event::<T>::Migrated {
+						top: task.dyn_top_items,
+						child: task.dyn_child_items,
+						compute: MigrationCompute::Auto,
+					});
+				}
+
+				MigrationProcess::<T>::put(task);
+
+				weight
+			} else {
+				T::DbWeight::get().reads(1)
+			}
+		}
+	}
+
+	impl<T: Config> Pallet<T> {
+		/// The real weight of a migration of the given number of `items` with total `size`.
+		fn dynamic_weight(items: u32, size: u32) -> frame_support::pallet_prelude::Weight {
+			let items = items as Weight;
+			items
+				.saturating_mul(<T as frame_system::Config>::DbWeight::get().reads_writes(1, 1))
+				// we assume that the read/write per-byte weight is the same for child and top tree.
+				.saturating_add(T::WeightInfo::process_top_key(size))
+		}
+
+		/// Put a stop to all ongoing migrations.
+		fn halt() {
+			AutoLimits::<T>::kill();
+			Self::deposit_event(Event::<T>::Halted);
+		}
+
+		/// Convert a child root key, aka. "Child-bearing top key" into the proper format.
+		fn transform_child_key(root: &Vec<u8>) -> Option<&[u8]> {
+			use sp_core::storage::{ChildType, PrefixedStorageKey};
+			match ChildType::from_prefixed_key(PrefixedStorageKey::new_ref(root)) {
+				Some((ChildType::ParentKeyId, root)) => Some(root),
+				_ => None,
+			}
+		}
+
+		/// Same as [`child_io_key`], and it halts the auto/unsigned migrations if a bad child root
+		/// is used.
+		///
+		/// This should be used when we are sure that `root` is a correct default child root.
+		fn transform_child_key_or_halt(root: &Vec<u8>) -> &[u8] {
+			let key = Self::transform_child_key(root);
+			if key.is_none() {
+				Self::halt();
+			}
+			key.unwrap_or_default()
+		}
+
+		/// Convert a child root to be in the default child-tree.
+		#[cfg(any(test, feature = "runtime-benchmarks"))]
+		pub(crate) fn childify(root: &'static str) -> Vec<u8> {
+			let mut string = DEFAULT_CHILD_STORAGE_KEY_PREFIX.to_vec();
+			string.extend_from_slice(root.as_ref());
+			string
+		}
+	}
+}
+
+#[cfg(feature = "runtime-benchmarks")]
+mod benchmarks {
+	use super::{pallet::Pallet as StateTrieMigration, *};
+	use frame_support::traits::{Currency, Get};
+	use sp_runtime::traits::Saturating;
+	use sp_std::prelude::*;
+
+	// The size of the key seemingly makes no difference in the read/write time, so we make it
+	// constant.
+	const KEY: &'static [u8] = b"key";
+
+	frame_benchmarking::benchmarks! {
+		continue_migrate {
+			// note that this benchmark should migrate nothing, as we only want the overhead weight
+			// of the bookkeeping, and the migration cost itself is noted via the `dynamic_weight`
+			// function.
+			let null = MigrationLimits::default();
+			let caller = frame_benchmarking::whitelisted_caller();
+		}: _(frame_system::RawOrigin::Signed(caller), null, 0, StateTrieMigration::<T>::migration_process())
+		verify {
+			assert_eq!(StateTrieMigration::<T>::migration_process(), Default::default())
+		}
+
+		continue_migrate_wrong_witness {
+			let null = MigrationLimits::default();
+			let caller = frame_benchmarking::whitelisted_caller();
+			let bad_witness = MigrationTask { last_top: Some(vec![1u8]), ..Default::default() };
+		}: {
+			assert!(
+				StateTrieMigration::<T>::continue_migrate(
+					frame_system::RawOrigin::Signed(caller).into(),
+					null,
+					0,
+					bad_witness,
+				)
+				.is_err()
+			)
+		}
+		verify {
+			assert_eq!(StateTrieMigration::<T>::migration_process(), Default::default())
+		}
+
+		migrate_custom_top_success {
+			let null = MigrationLimits::default();
+			let caller = frame_benchmarking::whitelisted_caller();
+			let deposit = T::SignedDepositBase::get().saturating_add(
+				T::SignedDepositPerItem::get().saturating_mul(1u32.into()),
+			);
+			let stash = T::Currency::minimum_balance() * BalanceOf::<T>::from(1000u32) + deposit;
+			T::Currency::make_free_balance_be(&caller, stash);
+		}: migrate_custom_top(frame_system::RawOrigin::Signed(caller.clone()), Default::default(), 0)
+		verify {
+			assert_eq!(StateTrieMigration::<T>::migration_process(), Default::default());
+			assert_eq!(T::Currency::free_balance(&caller), stash)
+		}
+
+		migrate_custom_top_fail {
+			let null = MigrationLimits::default();
+			let caller = frame_benchmarking::whitelisted_caller();
+			let deposit = T::SignedDepositBase::get().saturating_add(
+				T::SignedDepositPerItem::get().saturating_mul(1u32.into()),
+			);
+			let stash = T::Currency::minimum_balance() * BalanceOf::<T>::from(1000u32) + deposit;
+			T::Currency::make_free_balance_be(&caller, stash);
+			// for tests, we need to make sure there is _something_ in storage that is being
+			// migrated.
+			sp_io::storage::set(b"foo", vec![1u8;33].as_ref());
+		}: {
+			assert!(
+				StateTrieMigration::<T>::migrate_custom_top(
+					frame_system::RawOrigin::Signed(caller.clone()).into(),
+					vec![b"foo".to_vec()],
+					1,
+				).is_err()
+			)
+		}
+		verify {
+			assert_eq!(StateTrieMigration::<T>::migration_process(), Default::default());
+			// must have gotten slashed
+			assert!(T::Currency::free_balance(&caller) < stash)
+		}
+
+		migrate_custom_child_success {
+			let caller = frame_benchmarking::whitelisted_caller();
+			let deposit = T::SignedDepositBase::get().saturating_add(
+				T::SignedDepositPerItem::get().saturating_mul(1u32.into()),
+			);
+			let stash = T::Currency::minimum_balance() * BalanceOf::<T>::from(1000u32) + deposit;
+			T::Currency::make_free_balance_be(&caller, stash);
+		}: migrate_custom_child(
+			frame_system::RawOrigin::Signed(caller.clone()),
+			StateTrieMigration::<T>::childify(Default::default()),
+			Default::default(),
+			0
+		)
+		verify {
+			assert_eq!(StateTrieMigration::<T>::migration_process(), Default::default());
+			assert_eq!(T::Currency::free_balance(&caller), stash);
+		}
+
+		migrate_custom_child_fail {
+			let caller = frame_benchmarking::whitelisted_caller();
+			let deposit = T::SignedDepositBase::get().saturating_add(
+				T::SignedDepositPerItem::get().saturating_mul(1u32.into()),
+			);
+			let stash = T::Currency::minimum_balance() * BalanceOf::<T>::from(1000u32) + deposit;
+			T::Currency::make_free_balance_be(&caller, stash);
+			// for tests, we need to make sure there is _something_ in storage that is being
+			// migrated.
+			sp_io::default_child_storage::set(b"top", b"foo", vec![1u8;33].as_ref());
+		}: {
+			assert!(
+				StateTrieMigration::<T>::migrate_custom_child(
+					frame_system::RawOrigin::Signed(caller.clone()).into(),
+					StateTrieMigration::<T>::childify("top"),
+					vec![b"foo".to_vec()],
+					1,
+				).is_err()
+			)
+		}
+		verify {
+			assert_eq!(StateTrieMigration::<T>::migration_process(), Default::default());
+			// must have gotten slashed
+			assert!(T::Currency::free_balance(&caller) < stash)
+		}
+
+		process_top_key {
+			let v in 1 .. (4 * 1024 * 1024);
+
+			let value = sp_std::vec![1u8; v as usize];
+			sp_io::storage::set(KEY, &value);
+		}: {
+			let data = sp_io::storage::get(KEY).unwrap();
+			sp_io::storage::set(KEY, &data);
+			let _next = sp_io::storage::next_key(KEY);
+			assert_eq!(data, value);
+		}
+
+		impl_benchmark_test_suite!(
+			StateTrieMigration,
+			crate::mock::new_test_ext(sp_runtime::StateVersion::V0, true, None, None),
+			crate::mock::Test
+		);
+	}
+}
+
+#[cfg(test)]
+mod mock {
+	use super::*;
+	use crate as pallet_state_trie_migration;
+	use frame_support::{parameter_types, traits::Hooks};
+	use frame_system::{EnsureRoot, EnsureSigned};
+	use sp_core::{
+		storage::{ChildInfo, StateVersion},
+		H256,
+	};
+	use sp_runtime::{
+		traits::{BlakeTwo256, Header as _, IdentityLookup},
+		StorageChild,
+	};
+
+	type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Test>;
+	type Block = frame_system::mocking::MockBlock<Test>;
+
+	// Configure a mock runtime to test the pallet.
+	frame_support::construct_runtime!(
+		pub enum Test where
+			Block = Block,
+			NodeBlock = Block,
+			UncheckedExtrinsic = UncheckedExtrinsic,
+		{
+			System: frame_system::{Pallet, Call, Config, Storage, Event<T>},
+			Balances: pallet_balances::{Pallet, Call, Config<T>, Storage, Event<T>},
+			StateTrieMigration: pallet_state_trie_migration::{Pallet, Call, Storage, Event<T>},
+		}
+	);
+
+	parameter_types! {
+		pub const BlockHashCount: u32 = 250;
+		pub const SS58Prefix: u8 = 42;
+	}
+
+	impl frame_system::Config for Test {
+		type BaseCallFilter = frame_support::traits::Everything;
+		type BlockWeights = ();
+		type BlockLength = ();
+		type Origin = Origin;
+		type Call = Call;
+		type Index = u64;
+		type BlockNumber = u32;
+		type Hash = H256;
+		type Hashing = BlakeTwo256;
+		type AccountId = u64;
+		type Lookup = IdentityLookup<Self::AccountId>;
+		type Header = sp_runtime::generic::Header<Self::BlockNumber, BlakeTwo256>;
+		type Event = Event;
+		type BlockHashCount = BlockHashCount;
+		type DbWeight = ();
+		type Version = ();
+		type PalletInfo = PalletInfo;
+		type AccountData = pallet_balances::AccountData<u64>;
+		type OnNewAccount = ();
+		type OnKilledAccount = ();
+		type SystemWeightInfo = ();
+		type SS58Prefix = SS58Prefix;
+		type OnSetCode = ();
+		type MaxConsumers = frame_support::traits::ConstU32<16>;
+	}
+
+	parameter_types! {
+		pub const ExistentialDeposit: u64 = 1;
+		pub const OffchainRepeat: u32 = 1;
+		pub const SignedDepositPerItem: u64 = 1;
+		pub const SignedDepositBase: u64 = 5;
+		pub const SignedMigrationMaxLimits: MigrationLimits = MigrationLimits { size: 1024, item: 5 };
+	}
+
+	impl pallet_balances::Config for Test {
+		type Balance = u64;
+		type Event = Event;
+		type DustRemoval = ();
+		type ExistentialDeposit = ExistentialDeposit;
+		type AccountStore = System;
+		type MaxLocks = ();
+		type MaxReserves = ();
+		type ReserveIdentifier = [u8; 8];
+		type WeightInfo = ();
+	}
+
+	impl pallet_state_trie_migration::Config for Test {
+		type Event = Event;
+		type ControlOrigin = EnsureRoot<u64>;
+		type Currency = Balances;
+		type SignedDepositPerItem = SignedDepositPerItem;
+		type SignedDepositBase = SignedDepositBase;
+		type SignedMigrationMaxLimits = SignedMigrationMaxLimits;
+		type SignedFilter = EnsureSigned<Self::AccountId>;
+		type WeightInfo = ();
+	}
+
+	pub fn new_test_ext(
+		version: StateVersion,
+		with_pallets: bool,
+		custom_keys: Option<Vec<(Vec<u8>, Vec<u8>)>>,
+		custom_child: Option<Vec<(Vec<u8>, Vec<u8>, Vec<u8>)>>,
+	) -> sp_io::TestExternalities {
+		let minimum_size = sp_core::storage::TRIE_VALUE_NODE_THRESHOLD as usize + 1;
+		let mut custom_storage = sp_core::storage::Storage {
+			top: vec![
+				(b"key1".to_vec(), vec![1u8; minimum_size + 1]), // 6b657931
+				(b"key2".to_vec(), vec![1u8; minimum_size + 2]), // 6b657931
+				(b"key3".to_vec(), vec![1u8; minimum_size + 3]), // 6b657931
+				(b"key4".to_vec(), vec![1u8; minimum_size + 4]), // 6b657931
+				(b"key5".to_vec(), vec![1u8; minimum_size + 5]), // 6b657932
+				(b"key6".to_vec(), vec![1u8; minimum_size + 6]), // 6b657934
+				(b"key7".to_vec(), vec![1u8; minimum_size + 7]), // 6b657934
+				(b"key8".to_vec(), vec![1u8; minimum_size + 8]), // 6b657934
+				(b"key9".to_vec(), vec![1u8; minimum_size + 9]), // 6b657934
+				(b"CODE".to_vec(), vec![1u8; minimum_size + 100]), // 434f4445
+			]
+			.into_iter()
+			.chain(custom_keys.unwrap_or_default())
+			.collect(),
+			children_default: vec![
+				(
+					b"chk1".to_vec(), // 63686b31
+					StorageChild {
+						data: vec![
+							(b"key1".to_vec(), vec![1u8; 55]),
+							(b"key2".to_vec(), vec![2u8; 66]),
+						]
+						.into_iter()
+						.collect(),
+						child_info: ChildInfo::new_default(b"chk1"),
+					},
+				),
+				(
+					b"chk2".to_vec(),
+					StorageChild {
+						data: vec![
+							(b"key1".to_vec(), vec![1u8; 54]),
+							(b"key2".to_vec(), vec![2u8; 64]),
+						]
+						.into_iter()
+						.collect(),
+						child_info: ChildInfo::new_default(b"chk2"),
+					},
+				),
+			]
+			.into_iter()
+			.chain(
+				custom_child
+					.unwrap_or_default()
+					.into_iter()
+					.map(|(r, k, v)| {
+						(
+							r.clone(),
+							StorageChild {
+								data: vec![(k, v)].into_iter().collect(),
+								child_info: ChildInfo::new_default(&r),
+							},
+						)
+					})
+					.collect::<Vec<_>>(),
+			)
+			.collect(),
+		};
+
+		if with_pallets {
+			frame_system::GenesisConfig::default()
+				.assimilate_storage::<Test>(&mut custom_storage)
+				.unwrap();
+			pallet_balances::GenesisConfig::<Test> { balances: vec![(1, 1000)] }
+				.assimilate_storage(&mut custom_storage)
+				.unwrap();
+		}
+
+		sp_tracing::try_init_simple();
+		(custom_storage, version).into()
+	}
+
+	pub(crate) fn run_to_block(n: u32) -> (H256, u64) {
+		let mut root = Default::default();
+		let mut weight_sum = 0;
+		log::trace!(target: LOG_TARGET, "running from {:?} to {:?}", System::block_number(), n);
+		while System::block_number() < n {
+			System::set_block_number(System::block_number() + 1);
+			System::on_initialize(System::block_number());
+
+			weight_sum += StateTrieMigration::on_initialize(System::block_number());
+
+			root = System::finalize().state_root().clone();
+			System::on_finalize(System::block_number());
+		}
+		(root, weight_sum)
+	}
+}
+
+#[cfg(test)]
+mod test {
+	use super::{mock::*, *};
+	use sp_runtime::{traits::Bounded, StateVersion};
+
+	#[test]
+	fn fails_if_no_migration() {
+		let mut ext = new_test_ext(StateVersion::V0, false, None, None);
+		let root1 = ext.execute_with(|| run_to_block(30).0);
+
+		let mut ext2 = new_test_ext(StateVersion::V1, false, None, None);
+		let root2 = ext2.execute_with(|| run_to_block(30).0);
+
+		// these two roots should not be the same.
+		assert_ne!(root1, root2);
+	}
+
+	#[test]
+	#[ignore]
+	fn detects_value_in_empty_top_key() {
+		let limit = MigrationLimits { item: 1, size: 1000 };
+		let initial_keys = Some(vec![(vec![], vec![66u8; 77])]);
+		let mut ext = new_test_ext(StateVersion::V0, false, initial_keys.clone(), None);
+
+		let root_upgraded = ext.execute_with(|| {
+			sp_io::storage::set(&[], &vec![66u8; 77]);
+
+			AutoLimits::<Test>::put(Some(limit));
+			let root = run_to_block(30).0;
+
+			// eventually everything is over.
+			assert!(StateTrieMigration::migration_process().finished());
+			root
+		});
+
+		let mut ext2 = new_test_ext(StateVersion::V1, false, initial_keys, None);
+		let root = ext2.execute_with(|| {
+			AutoLimits::<Test>::put(Some(limit));
+			run_to_block(30).0
+		});
+
+		assert_eq!(root, root_upgraded);
+	}
+
+	#[test]
+	#[ignore]
+	fn detects_value_in_first_child_key() {
+		use frame_support::storage::child;
+		let limit = MigrationLimits { item: 1, size: 1000 };
+		let initial_child = Some(vec![(b"chk1".to_vec(), vec![], vec![66u8; 77])]);
+		let mut ext = new_test_ext(StateVersion::V0, false, None, initial_child.clone());
+
+		let root_upgraded = ext.execute_with(|| {
+			AutoLimits::<Test>::put(Some(limit));
+			let root = run_to_block(30).0;
+
+			// eventually everything is over.
+			assert!(StateTrieMigration::migration_process().finished());
+			root
+		});
+
+		let mut ext2 = new_test_ext(StateVersion::V1, false, None, initial_child);
+		let root = ext2.execute_with(|| {
+			child::put(&child::ChildInfo::new_default(b"chk1"), &[], &vec![66u8; 77]);
+			AutoLimits::<Test>::put(Some(limit));
+			run_to_block(30).0
+		});
+
+		assert_eq!(root, root_upgraded);
+	}
+
+	#[test]
+	fn auto_migrate_works() {
+		let run_with_limits = |limit, from, until| {
+			let mut ext = new_test_ext(StateVersion::V0, false, None, None);
+			let root_upgraded = ext.execute_with(|| {
+				assert_eq!(AutoLimits::<Test>::get(), None);
+				assert_eq!(MigrationProcess::<Test>::get(), Default::default());
+
+				// nothing happens if we don't set the limits.
+				let _ = run_to_block(from);
+				assert_eq!(MigrationProcess::<Test>::get(), Default::default());
+
+				// this should allow 1 item per block to be migrated.
+				AutoLimits::<Test>::put(Some(limit));
+
+				let root = run_to_block(until).0;
+
+				// eventually everything is over.
+				assert!(matches!(
+					StateTrieMigration::migration_process(),
+					MigrationTask { last_child: None, last_top: None, .. }
+				));
+				root
+			});
+
+			let mut ext2 = new_test_ext(StateVersion::V1, false, None, None);
+			let root = ext2.execute_with(|| {
+				// update ex2 to contain the new items
+				let _ = run_to_block(from);
+				AutoLimits::<Test>::put(Some(limit));
+				run_to_block(until).0
+			});
+			assert_eq!(root, root_upgraded);
+		};
+
+		// single item
+		run_with_limits(MigrationLimits { item: 1, size: 1000 }, 10, 100);
+		// multi-item
+		run_with_limits(MigrationLimits { item: 5, size: 1000 }, 10, 100);
+		// multi-item, based on size. Note that largest value is 100 bytes.
+		run_with_limits(MigrationLimits { item: 1000, size: 128 }, 10, 100);
+		// unbounded
+		run_with_limits(
+			MigrationLimits { item: Bounded::max_value(), size: Bounded::max_value() },
+			10,
+			100,
+		);
+	}
+
+	#[test]
+	fn signed_migrate_works() {
+		new_test_ext(StateVersion::V0, true, None, None).execute_with(|| {
+			assert_eq!(MigrationProcess::<Test>::get(), Default::default());
+
+			// can't submit if limit is too high.
+			frame_support::assert_err!(
+				StateTrieMigration::continue_migrate(
+					Origin::signed(1),
+					MigrationLimits { item: 5, size: sp_runtime::traits::Bounded::max_value() },
+					Bounded::max_value(),
+					MigrationProcess::<Test>::get()
+				),
+				Error::<Test>::MaxSignedLimits,
+			);
+
+			// can't submit if poor.
+			frame_support::assert_err!(
+				StateTrieMigration::continue_migrate(
+					Origin::signed(2),
+					MigrationLimits { item: 5, size: 100 },
+					100,
+					MigrationProcess::<Test>::get()
+				),
+				Error::<Test>::NotEnoughFunds,
+			);
+
+			// can't submit with bad witness.
+			frame_support::assert_err_ignore_postinfo!(
+				StateTrieMigration::continue_migrate(
+					Origin::signed(1),
+					MigrationLimits { item: 5, size: 100 },
+					100,
+					MigrationTask { last_top: Some(vec![1u8]), ..Default::default() }
+				),
+				Error::<Test>::BadWitness
+			);
+
+			// migrate all keys in a series of submissions
+			while !MigrationProcess::<Test>::get().finished() {
+				// first we compute the task to get the accurate consumption.
+				let mut task = StateTrieMigration::migration_process();
+				task.migrate_until_exhaustion(SignedMigrationMaxLimits::get());
+
+				frame_support::assert_ok!(StateTrieMigration::continue_migrate(
+					Origin::signed(1),
+					SignedMigrationMaxLimits::get(),
+					task.dyn_size,
+					MigrationProcess::<Test>::get()
+				));
+
+				// no funds should remain reserved.
+				assert_eq!(Balances::reserved_balance(&1), 0);
+
+				// and the task should be updated
+				assert!(matches!(
+					StateTrieMigration::migration_process(),
+					MigrationTask { size: x, .. } if x > 0,
+				));
+			}
+		});
+	}
+
+	#[test]
+	fn custom_migrate_top_works() {
+		let correct_witness = 3 + sp_core::storage::TRIE_VALUE_NODE_THRESHOLD * 3 + 1 + 2 + 3;
+		new_test_ext(StateVersion::V0, true, None, None).execute_with(|| {
+			frame_support::assert_ok!(StateTrieMigration::migrate_custom_top(
+				Origin::signed(1),
+				vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()],
+				correct_witness,
+			));
+
+			// no funds should remain reserved.
+			assert_eq!(Balances::reserved_balance(&1), 0);
+			assert_eq!(Balances::free_balance(&1), 1000);
+		});
+
+		new_test_ext(StateVersion::V0, true, None, None).execute_with(|| {
+			// works if the witness is an overestimate
+			frame_support::assert_ok!(StateTrieMigration::migrate_custom_top(
+				Origin::signed(1),
+				vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()],
+				correct_witness + 99,
+			));
+
+			// no funds should remain reserved.
+			assert_eq!(Balances::reserved_balance(&1), 0);
+			assert_eq!(Balances::free_balance(&1), 1000);
+		});
+
+		new_test_ext(StateVersion::V0, true, None, None).execute_with(|| {
+			assert_eq!(Balances::free_balance(&1), 1000);
+
+			// note that we don't expect this to be a noop -- we do slash.
+			frame_support::assert_err!(
+				StateTrieMigration::migrate_custom_top(
+					Origin::signed(1),
+					vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()],
+					correct_witness - 1,
+				),
+				"wrong witness data"
+			);
+
+			// no funds should remain reserved.
+			assert_eq!(Balances::reserved_balance(&1), 0);
+			assert_eq!(
+				Balances::free_balance(&1),
+				1000 - (3 * SignedDepositPerItem::get() + SignedDepositBase::get())
+			);
+		});
+	}
+
+	#[test]
+	fn custom_migrate_child_works() {
+		new_test_ext(StateVersion::V0, true, None, None).execute_with(|| {
+			frame_support::assert_ok!(StateTrieMigration::migrate_custom_child(
+				Origin::signed(1),
+				StateTrieMigration::childify("chk1"),
+				vec![b"key1".to_vec(), b"key2".to_vec()],
+				55 + 66,
+			));
+
+			// no funds should remain reserved.
+			assert_eq!(Balances::reserved_balance(&1), 0);
+			assert_eq!(Balances::free_balance(&1), 1000);
+		});
+
+		new_test_ext(StateVersion::V0, true, None, None).execute_with(|| {
+			assert_eq!(Balances::free_balance(&1), 1000);
+
+			// note that we don't expect this to be a noop -- we do slash.
+			assert!(StateTrieMigration::migrate_custom_child(
+				Origin::signed(1),
+				StateTrieMigration::childify("chk1"),
+				vec![b"key1".to_vec(), b"key2".to_vec()],
+				999999, // wrong witness
+			)
+			.is_err());
+
+			// no funds should remain reserved.
+			assert_eq!(Balances::reserved_balance(&1), 0);
+			assert_eq!(
+				Balances::free_balance(&1),
+				1000 - (2 * SignedDepositPerItem::get() + SignedDepositBase::get())
+			);
+		});
+	}
+}
+
+/// Exported set of tests to be called against different runtimes.
+#[cfg(feature = "remote-test")]
+pub(crate) mod remote_tests {
+	use crate::{AutoLimits, MigrationLimits, Pallet as StateTrieMigration, LOG_TARGET};
+	use codec::Encode;
+	use frame_benchmarking::Zero;
+	use frame_support::traits::{Get, Hooks};
+	use frame_system::Pallet as System;
+	use remote_externalities::Mode;
+	use sp_core::H256;
+	use sp_runtime::traits::{Block as BlockT, HashFor, Header as _, One};
+	use thousands::Separable;
+
+	fn run_to_block<Runtime: crate::Config<Hash = H256>>(
+		n: <Runtime as frame_system::Config>::BlockNumber,
+	) -> (H256, u64) {
+		let mut root = Default::default();
+		let mut weight_sum = 0;
+		while System::<Runtime>::block_number() < n {
+			System::<Runtime>::set_block_number(System::<Runtime>::block_number() + One::one());
+			System::<Runtime>::on_initialize(System::<Runtime>::block_number());
+
+			weight_sum +=
+				StateTrieMigration::<Runtime>::on_initialize(System::<Runtime>::block_number());
+
+			root = System::<Runtime>::finalize().state_root().clone();
+			System::<Runtime>::on_finalize(System::<Runtime>::block_number());
+		}
+		(root, weight_sum)
+	}
+
+	/// Run the entire migration, against the given `Runtime`, until completion.
+	///
+	/// This will print some very useful statistics, make sure [`crate::LOG_TARGET`] is enabled.
+	pub(crate) async fn run_with_limits<
+		Runtime: crate::Config<Hash = H256>,
+		Block: BlockT<Hash = H256> + serde::de::DeserializeOwned,
+	>(
+		limits: MigrationLimits,
+		mode: Mode<Block>,
+	) {
+		let mut ext = remote_externalities::Builder::<Block>::new()
+			.mode(mode)
+			.state_version(sp_core::storage::StateVersion::V0)
+			.build()
+			.await
+			.unwrap();
+
+		let mut now = ext.execute_with(|| {
+			AutoLimits::<Runtime>::put(Some(limits));
+			// requires the block number type in our tests to be same as with mainnet, u32.
+			frame_system::Pallet::<Runtime>::block_number()
+		});
+
+		let mut duration: <Runtime as frame_system::Config>::BlockNumber = Zero::zero();
+		// set the version to 1, as if the upgrade happened.
+		ext.state_version = sp_core::storage::StateVersion::V1;
+
+		let (top_left, child_left) = ext.as_backend().essence().check_migration_state().unwrap();
+		assert!(
+			top_left > 0,
+			"no node needs migrating, this probably means that state was initialized with `StateVersion::V1`",
+		);
+
+		log::info!(
+			target: LOG_TARGET,
+			"initial check: top_left: {}, child_left: {}",
+			top_left.separate_with_commas(),
+			child_left.separate_with_commas(),
+		);
+
+		loop {
+			let last_state_root = ext.backend.root().clone();
+			let ((finished, weight), proof) = ext.execute_and_prove(|| {
+				let weight = run_to_block::<Runtime>(now + One::one()).1;
+				if StateTrieMigration::<Runtime>::migration_process().finished() {
+					return (true, weight)
+				}
+				duration += One::one();
+				now += One::one();
+				(false, weight)
+			});
+
+			let compact_proof =
+				proof.clone().into_compact_proof::<HashFor<Block>>(last_state_root).unwrap();
+			log::info!(
+				target: LOG_TARGET,
+				"proceeded to #{}, weight: [{} / {}], proof: [{} / {} / {}]",
+				now,
+				weight.separate_with_commas(),
+				<Runtime as frame_system::Config>::BlockWeights::get()
+					.max_block
+					.separate_with_commas(),
+				proof.encoded_size().separate_with_commas(),
+				compact_proof.encoded_size().separate_with_commas(),
+				zstd::stream::encode_all(&compact_proof.encode()[..], 0)
+					.unwrap()
+					.len()
+					.separate_with_commas(),
+			);
+			ext.commit_all().unwrap();
+
+			if finished {
+				break
+			}
+		}
+
+		ext.execute_with(|| {
+			log::info!(
+				target: LOG_TARGET,
+				"finished on_initialize migration in {} block, final state of the task: {:?}",
+				duration,
+				StateTrieMigration::<Runtime>::migration_process(),
+			)
+		});
+
+		let (top_left, child_left) = ext.as_backend().essence().check_migration_state().unwrap();
+		assert_eq!(top_left, 0);
+		assert_eq!(child_left, 0);
+	}
+}
+
+#[cfg(all(test, feature = "remote-test"))]
+mod remote_tests_local {
+	use super::{
+		mock::{Call as MockCall, *},
+		remote_tests::run_with_limits,
+		*,
+	};
+	use remote_externalities::{Mode, OfflineConfig, OnlineConfig};
+	use sp_runtime::traits::Bounded;
+
+	// we only use the hash type from this, so using the mock should be fine.
+	type Extrinsic = sp_runtime::testing::TestXt<MockCall, ()>;
+	type Block = sp_runtime::testing::Block<Extrinsic>;
+
+	#[tokio::test]
+	async fn on_initialize_migration() {
+		sp_tracing::try_init_simple();
+		let mode = Mode::OfflineOrElseOnline(
+			OfflineConfig { state_snapshot: env!("SNAP").to_owned().into() },
+			OnlineConfig {
+				transport: std::env!("WS_API").to_owned().into(),
+				state_snapshot: Some(env!("SNAP").to_owned().into()),
+				..Default::default()
+			},
+		);
+
+		// item being the bottleneck
+		run_with_limits::<Test, Block>(
+			MigrationLimits { item: 8 * 1024, size: 128 * 1024 * 1024 },
+			mode.clone(),
+		)
+		.await;
+		// size being the bottleneck
+		run_with_limits::<Test, Block>(
+			MigrationLimits { item: Bounded::max_value(), size: 64 * 1024 },
+			mode,
+		)
+		.await;
+	}
+}
diff --git a/substrate/frame/state-trie-migration/src/weights.rs b/substrate/frame/state-trie-migration/src/weights.rs
new file mode 100644
index 00000000000..f08b115378f
--- /dev/null
+++ b/substrate/frame/state-trie-migration/src/weights.rs
@@ -0,0 +1,137 @@
+// This file is part of Substrate.
+
+// Copyright (C) 2022 Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Autogenerated weights for pallet_state_trie_migration
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
+//! DATE: 2022-03-04, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024
+
+// Executed Command:
+// target/production/substrate
+// benchmark
+// --chain=dev
+// --steps=50
+// --repeat=20
+// --pallet=pallet_state_trie_migration
+// --extrinsic=*
+// --execution=wasm
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --output=./frame/state-trie-migration/src/weights.rs
+// --template=./.maintain/frame-weight-template.hbs
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+
+use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
+use sp_std::marker::PhantomData;
+
+/// Weight functions needed for pallet_state_trie_migration.
+pub trait WeightInfo {
+	fn continue_migrate() -> Weight;
+	fn continue_migrate_wrong_witness() -> Weight;
+	fn migrate_custom_top_success() -> Weight;
+	fn migrate_custom_top_fail() -> Weight;
+	fn migrate_custom_child_success() -> Weight;
+	fn migrate_custom_child_fail() -> Weight;
+	fn process_top_key(v: u32, ) -> Weight;
+}
+
+/// Weights for pallet_state_trie_migration using the Substrate node and recommended hardware.
+pub struct SubstrateWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	// Storage: StateTrieMigration MigrationProcess (r:1 w:1)
+	fn continue_migrate() -> Weight {
+		(13_385_000 as Weight)
+			.saturating_add(T::DbWeight::get().reads(1 as Weight))
+			.saturating_add(T::DbWeight::get().writes(1 as Weight))
+	}
+	// Storage: StateTrieMigration MigrationProcess (r:1 w:0)
+	fn continue_migrate_wrong_witness() -> Weight {
+		(1_757_000 as Weight)
+			.saturating_add(T::DbWeight::get().reads(1 as Weight))
+	}
+	fn migrate_custom_top_success() -> Weight {
+		(12_813_000 as Weight)
+	}
+	// Storage: unknown [0x666f6f] (r:1 w:1)
+	fn migrate_custom_top_fail() -> Weight {
+		(24_961_000 as Weight)
+			.saturating_add(T::DbWeight::get().reads(1 as Weight))
+			.saturating_add(T::DbWeight::get().writes(1 as Weight))
+	}
+	fn migrate_custom_child_success() -> Weight {
+		(13_132_000 as Weight)
+	}
+	// Storage: unknown [0x666f6f] (r:1 w:1)
+	fn migrate_custom_child_fail() -> Weight {
+		(29_215_000 as Weight)
+			.saturating_add(T::DbWeight::get().reads(1 as Weight))
+			.saturating_add(T::DbWeight::get().writes(1 as Weight))
+	}
+	// Storage: unknown [0x6b6579] (r:1 w:1)
+	fn process_top_key(v: u32, ) -> Weight {
+		(0 as Weight)
+			// Standard Error: 0
+			.saturating_add((2_000 as Weight).saturating_mul(v as Weight))
+			.saturating_add(T::DbWeight::get().reads(1 as Weight))
+			.saturating_add(T::DbWeight::get().writes(1 as Weight))
+	}
+}
+
+// For backwards compatibility and tests
+impl WeightInfo for () {
+	// Storage: StateTrieMigration MigrationProcess (r:1 w:1)
+	fn continue_migrate() -> Weight {
+		(13_385_000 as Weight)
+			.saturating_add(RocksDbWeight::get().reads(1 as Weight))
+			.saturating_add(RocksDbWeight::get().writes(1 as Weight))
+	}
+	// Storage: StateTrieMigration MigrationProcess (r:1 w:0)
+	fn continue_migrate_wrong_witness() -> Weight {
+		(1_757_000 as Weight)
+			.saturating_add(RocksDbWeight::get().reads(1 as Weight))
+	}
+	fn migrate_custom_top_success() -> Weight {
+		(12_813_000 as Weight)
+	}
+	// Storage: unknown [0x666f6f] (r:1 w:1)
+	fn migrate_custom_top_fail() -> Weight {
+		(24_961_000 as Weight)
+			.saturating_add(RocksDbWeight::get().reads(1 as Weight))
+			.saturating_add(RocksDbWeight::get().writes(1 as Weight))
+	}
+	fn migrate_custom_child_success() -> Weight {
+		(13_132_000 as Weight)
+	}
+	// Storage: unknown [0x666f6f] (r:1 w:1)
+	fn migrate_custom_child_fail() -> Weight {
+		(29_215_000 as Weight)
+			.saturating_add(RocksDbWeight::get().reads(1 as Weight))
+			.saturating_add(RocksDbWeight::get().writes(1 as Weight))
+	}
+	// Storage: unknown [0x6b6579] (r:1 w:1)
+	fn process_top_key(v: u32, ) -> Weight {
+		(0 as Weight)
+			// Standard Error: 0
+			.saturating_add((2_000 as Weight).saturating_mul(v as Weight))
+			.saturating_add(RocksDbWeight::get().reads(1 as Weight))
+			.saturating_add(RocksDbWeight::get().writes(1 as Weight))
+	}
+}
diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs
index d2fd438d3a8..8c61874003b 100644
--- a/substrate/frame/support/src/traits/misc.rs
+++ b/substrate/frame/support/src/traits/misc.rs
@@ -38,18 +38,18 @@ macro_rules! defensive {
 		frame_support::log::error!(
 			target: "runtime",
 			"{}",
-			$crate::traits::misc::DEFENSIVE_OP_PUBLIC_ERROR
+			$crate::traits::DEFENSIVE_OP_PUBLIC_ERROR
 		);
-		debug_assert!(false, "{}", $crate::traits::misc::DEFENSIVE_OP_INTERNAL_ERROR);
+		debug_assert!(false, "{}", $crate::traits::DEFENSIVE_OP_INTERNAL_ERROR);
 	};
 	($error:tt) => {
 		frame_support::log::error!(
 			target: "runtime",
 			"{}: {:?}",
-			$crate::traits::misc::DEFENSIVE_OP_PUBLIC_ERROR,
+			$crate::traits::DEFENSIVE_OP_PUBLIC_ERROR,
 			$error
 		);
-		debug_assert!(false, "{}: {:?}", $crate::traits::misc::DEFENSIVE_OP_INTERNAL_ERROR, $error);
+		debug_assert!(false, "{}: {:?}", $crate::traits::DEFENSIVE_OP_INTERNAL_ERROR, $error);
 	}
 }
 
diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs
index d21364d3f8f..b7c8b69e8a0 100644
--- a/substrate/primitives/core/src/lib.rs
+++ b/substrate/primitives/core/src/lib.rs
@@ -96,7 +96,7 @@ pub enum ExecutionContext {
 	/// We distinguish between major sync and import so that validators who are running
 	/// their initial sync (or catching up after some time offline) can use the faster
 	/// native runtime (since we can reasonably assume the network as a whole has already
-	/// come to a broad conensus on the block and it probably hasn't been crafted
+	/// come to a broad consensus on the block and it probably hasn't been crafted
 	/// specifically to attack this node), but when importing blocks at the head of the
 	/// chain in normal operation they can use the safer Wasm version.
 	Syncing,
diff --git a/substrate/primitives/state-machine/src/testing.rs b/substrate/primitives/state-machine/src/testing.rs
index 6be601aa72b..7f71d24b761 100644
--- a/substrate/primitives/state-machine/src/testing.rs
+++ b/substrate/primitives/state-machine/src/testing.rs
@@ -23,8 +23,8 @@ use std::{
 };
 
 use crate::{
-	backend::Backend, ext::Ext, InMemoryBackend, InMemoryProvingBackend, OverlayedChanges,
-	StorageKey, StorageTransactionCache, StorageValue,
+	backend::Backend, ext::Ext, InMemoryBackend, OverlayedChanges, StorageKey,
+	StorageTransactionCache, StorageValue,
 };
 
 use hash_db::Hasher;
@@ -100,7 +100,6 @@ where
 		state_version: StateVersion,
 	) -> Self {
 		assert!(storage.top.keys().all(|key| !is_child_storage_key(key)));
-		assert!(storage.children_default.keys().all(|key| is_child_storage_key(key)));
 
 		storage.top.insert(CODE.to_vec(), code.to_vec());
 
@@ -204,7 +203,7 @@ where
 	/// This implementation will wipe the proof recorded in between calls. Consecutive calls will
 	/// get their own proof from scratch.
 	pub fn execute_and_prove<R>(&mut self, execute: impl FnOnce() -> R) -> (R, StorageProof) {
-		let proving_backend = InMemoryProvingBackend::new(&self.backend);
+		let proving_backend = crate::InMemoryProvingBackend::new(&self.backend);
 		let mut proving_ext = Ext::new(
 			&mut self.overlay,
 			&mut self.storage_transaction_cache,
diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs
index e33e50641bb..b0eb5438243 100644
--- a/substrate/primitives/state-machine/src/trie_backend_essence.rs
+++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs
@@ -23,21 +23,23 @@ use codec::Encode;
 use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix};
 #[cfg(feature = "std")]
 use parking_lot::RwLock;
-use sp_core::storage::{ChildInfo, ChildType, StateVersion};
+use sp_core::storage::{ChildInfo, ChildType, PrefixedStorageKey, StateVersion};
 use sp_std::{boxed::Box, vec::Vec};
 use sp_trie::{
 	child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_value,
 	read_trie_value,
 	trie_types::{TrieDB, TrieError},
-	DBValue, KeySpacedDB, PrefixedMemoryDB, Trie, TrieDBIterator, TrieDBKeyIterator,
+	DBValue, KeySpacedDB, LayoutV1 as Layout, PrefixedMemoryDB, Trie, TrieDBIterator,
+	TrieDBKeyIterator,
 };
 #[cfg(feature = "std")]
 use std::collections::HashMap;
 #[cfg(feature = "std")]
 use std::sync::Arc;
-// In this module, we only use layout for read operation and empty root,
-// where V1 and V0 are equivalent.
-use sp_trie::LayoutV1 as Layout;
+use trie_db::{
+	node::{NodePlan, ValuePlan},
+	TrieDBNodeIterator,
+};
 
 #[cfg(not(feature = "std"))]
 macro_rules! format {
@@ -431,6 +433,72 @@ where
 		);
 	}
 
+	/// Check remaining state item to migrate. Note this function should be remove when all state
+	/// migration did finished as it is only an utility.
+	// original author: @cheme
+	pub fn check_migration_state(&self) -> Result<(u64, u64)> {
+		let threshold: u32 = sp_core::storage::TRIE_VALUE_NODE_THRESHOLD;
+		let mut nb_to_migrate = 0;
+		let mut nb_to_migrate_child = 0;
+
+		let trie = sp_trie::trie_types::TrieDB::new(self, &self.root)
+			.map_err(|e| format!("TrieDB creation error: {}", e))?;
+		let iter_node = TrieDBNodeIterator::new(&trie)
+			.map_err(|e| format!("TrieDB node iterator error: {}", e))?;
+		for node in iter_node {
+			let node = node.map_err(|e| format!("TrieDB node iterator error: {}", e))?;
+			match node.2.node_plan() {
+				NodePlan::Leaf { value, .. } |
+				NodePlan::NibbledBranch { value: Some(value), .. } =>
+					if let ValuePlan::Inline(range) = value {
+						if (range.end - range.start) as u32 >= threshold {
+							nb_to_migrate += 1;
+						}
+					},
+				_ => (),
+			}
+		}
+
+		let mut child_roots: Vec<(ChildInfo, Vec<u8>)> = Vec::new();
+		// get all child trie roots
+		for key_value in trie.iter().map_err(|e| format!("TrieDB node iterator error: {}", e))? {
+			let (key, value) =
+				key_value.map_err(|e| format!("TrieDB node iterator error: {}", e))?;
+			if key[..]
+				.starts_with(sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX)
+			{
+				let prefixed_key = PrefixedStorageKey::new(key);
+				let (_type, unprefixed) = ChildType::from_prefixed_key(&prefixed_key).unwrap();
+				child_roots.push((ChildInfo::new_default(unprefixed), value));
+			}
+		}
+		for (child_info, root) in child_roots {
+			let mut child_root = H::Out::default();
+			let storage = KeySpacedDB::new(self, child_info.keyspace());
+
+			child_root.as_mut()[..].copy_from_slice(&root[..]);
+			let trie = sp_trie::trie_types::TrieDB::new(&storage, &child_root)
+				.map_err(|e| format!("New child TrieDB error: {}", e))?;
+			let iter_node = TrieDBNodeIterator::new(&trie)
+				.map_err(|e| format!("TrieDB node iterator error: {}", e))?;
+			for node in iter_node {
+				let node = node.map_err(|e| format!("Child TrieDB node iterator error: {}", e))?;
+				match node.2.node_plan() {
+					NodePlan::Leaf { value, .. } |
+					NodePlan::NibbledBranch { value: Some(value), .. } =>
+						if let ValuePlan::Inline(range) = value {
+							if (range.end - range.start) as u32 >= threshold {
+								nb_to_migrate_child += 1;
+							}
+						},
+					_ => (),
+				}
+			}
+		}
+
+		Ok((nb_to_migrate, nb_to_migrate_child))
+	}
+
 	/// Returns all `(key, value)` pairs in the trie.
 	pub fn pairs(&self) -> Vec<(StorageKey, StorageValue)> {
 		let collect_all = || -> sp_std::result::Result<_, Box<TrieError<H::Out>>> {
diff --git a/substrate/primitives/storage/src/lib.rs b/substrate/primitives/storage/src/lib.rs
index 1e97454b260..d377ea931df 100644
--- a/substrate/primitives/storage/src/lib.rs
+++ b/substrate/primitives/storage/src/lib.rs
@@ -119,8 +119,7 @@ impl DerefMut for PrefixedStorageKey {
 }
 
 impl PrefixedStorageKey {
-	/// Create a prefixed storage key from its byte array
-	/// representation.
+	/// Create a prefixed storage key from its byte array representation.
 	pub fn new(inner: Vec<u8>) -> Self {
 		PrefixedStorageKey(inner)
 	}
@@ -130,9 +129,7 @@ impl PrefixedStorageKey {
 		PrefixedStorageKey::ref_cast(inner)
 	}
 
-	/// Get inner key, this should
-	/// only be needed when writing
-	/// into parent trie to avoid an
+	/// Get inner key, this should only be needed when writing into parent trie to avoid an
 	/// allocation.
 	pub fn into_inner(self) -> Vec<u8> {
 		self.0
@@ -171,10 +168,8 @@ pub struct StorageChild {
 pub struct Storage {
 	/// Top trie storage data.
 	pub top: StorageMap,
-	/// Children trie storage data.
-	/// The key does not including prefix, for the `default`
-	/// trie kind, so this is exclusively for the `ChildType::ParentKeyId`
-	/// tries.
+	/// Children trie storage data. Key does not include prefix, only for the `default` trie kind,
+	/// of `ChildType::ParentKeyId` type.
 	pub children_default: std::collections::HashMap<Vec<u8>, StorageChild>,
 }
 
diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs
index d6dfd1c59d4..533d65c49c2 100644
--- a/substrate/utils/frame/remote-externalities/src/lib.rs
+++ b/substrate/utils/frame/remote-externalities/src/lib.rs
@@ -40,7 +40,7 @@ use sp_core::{
 	},
 };
 pub use sp_io::TestExternalities;
-use sp_runtime::traits::Block as BlockT;
+use sp_runtime::{traits::Block as BlockT, StateVersion};
 use std::{
 	fs,
 	path::{Path, PathBuf},
@@ -56,6 +56,7 @@ type ChildKeyValues = Vec<(ChildInfo, Vec<KeyValue>)>;
 const LOG_TARGET: &str = "remote-ext";
 const DEFAULT_TARGET: &str = "wss://rpc.polkadot.io:443";
 const BATCH_SIZE: usize = 1000;
+const PAGE: u32 = 512;
 
 #[rpc(client)]
 pub trait RpcApi<Hash> {
@@ -117,12 +118,6 @@ pub struct OfflineConfig {
 	pub state_snapshot: SnapshotConfig,
 }
 
-impl<P: Into<PathBuf>> From<P> for SnapshotConfig {
-	fn from(p: P) -> Self {
-		Self { path: p.into() }
-	}
-}
-
 /// Description of the transport protocol (for online execution).
 #[derive(Debug, Clone)]
 pub enum Transport {
@@ -187,6 +182,8 @@ pub struct OnlineConfig<B: BlockT> {
 	pub pallets: Vec<String>,
 	/// Transport config.
 	pub transport: Transport,
+	/// Lookout for child-keys, and scrape them as well if set to true.
+	pub scrape_children: bool,
 }
 
 impl<B: BlockT> OnlineConfig<B> {
@@ -205,10 +202,17 @@ impl<B: BlockT> Default for OnlineConfig<B> {
 			at: None,
 			state_snapshot: None,
 			pallets: vec![],
+			scrape_children: true,
 		}
 	}
 }
 
+impl<B: BlockT> From<String> for OnlineConfig<B> {
+	fn from(s: String) -> Self {
+		Self { transport: s.into(), ..Default::default() }
+	}
+}
+
 /// Configuration of the state snapshot.
 #[derive(Clone)]
 pub struct SnapshotConfig {
@@ -222,6 +226,12 @@ impl SnapshotConfig {
 	}
 }
 
+impl From<String> for SnapshotConfig {
+	fn from(s: String) -> Self {
+		Self::new(s)
+	}
+}
+
 impl Default for SnapshotConfig {
 	fn default() -> Self {
 		Self { path: Path::new("SNAPSHOT").into() }
@@ -242,6 +252,8 @@ pub struct Builder<B: BlockT> {
 	hashed_blacklist: Vec<Vec<u8>>,
 	/// connectivity mode, online or offline.
 	mode: Mode<B>,
+	/// The state version being used.
+	state_version: StateVersion,
 }
 
 // NOTE: ideally we would use `DefaultNoBound` here, but not worth bringing in frame-support for
@@ -254,6 +266,7 @@ impl<B: BlockT + DeserializeOwned> Default for Builder<B> {
 			hashed_prefixes: Default::default(),
 			hashed_keys: Default::default(),
 			hashed_blacklist: Default::default(),
+			state_version: StateVersion::V1,
 		}
 	}
 }
@@ -306,7 +319,6 @@ impl<B: BlockT + DeserializeOwned> Builder<B> {
 		prefix: StorageKey,
 		at: B::Hash,
 	) -> Result<Vec<StorageKey>, &'static str> {
-		const PAGE: u32 = 512;
 		let mut last_key: Option<StorageKey> = None;
 		let mut all_keys: Vec<StorageKey> = vec![];
 		let keys = loop {
@@ -320,6 +332,7 @@ impl<B: BlockT + DeserializeOwned> Builder<B> {
 					"rpc get_keys failed"
 				})?;
 			let page_len = page.len();
+
 			all_keys.extend(page);
 
 			if page_len < PAGE as usize {
@@ -362,11 +375,12 @@ impl<B: BlockT + DeserializeOwned> Builder<B> {
 				.cloned()
 				.map(|key| ("state_getStorage", rpc_params![key, at]))
 				.collect::<Vec<_>>();
+
 			let values = client.batch_request::<Option<StorageData>>(batch).await.map_err(|e| {
 				log::error!(
 					target: LOG_TARGET,
 					"failed to execute batch: {:?}. Error: {:?}",
-					chunk_keys,
+					chunk_keys.iter().map(|k| HexDisplay::from(k)).collect::<Vec<_>>(),
 					e
 				);
 				"batch failed."
@@ -693,7 +707,7 @@ impl<B: BlockT + DeserializeOwned> Builder<B> {
 
 		// inject manual key values.
 		if !self.hashed_key_values.is_empty() {
-			log::debug!(
+			log::info!(
 				target: LOG_TARGET,
 				"extending externalities with {} manually injected key-values",
 				self.hashed_key_values.len()
@@ -703,7 +717,7 @@ impl<B: BlockT + DeserializeOwned> Builder<B> {
 
 		// exclude manual key values.
 		if !self.hashed_blacklist.is_empty() {
-			log::debug!(
+			log::info!(
 				target: LOG_TARGET,
 				"excluding externalities from {} keys",
 				self.hashed_blacklist.len()
@@ -795,6 +809,12 @@ impl<B: BlockT + DeserializeOwned> Builder<B> {
 		self
 	}
 
+	/// The state version to use.
+	pub fn state_version(mut self, version: StateVersion) -> Self {
+		self.state_version = version;
+		self
+	}
+
 	/// overwrite the `at` value, if `mode` is set to [`Mode::Online`].
 	///
 	/// noop if `mode` is [`Mode::Offline`]
@@ -808,8 +828,13 @@ impl<B: BlockT + DeserializeOwned> Builder<B> {
 
 	/// Build the test externalities.
 	pub async fn build(self) -> Result<TestExternalities, &'static str> {
+		let state_version = self.state_version;
 		let (top_kv, child_kv) = self.pre_build().await?;
-		let mut ext = TestExternalities::new_with_code(Default::default(), Default::default());
+		let mut ext = TestExternalities::new_with_code_and_state(
+			Default::default(),
+			Default::default(),
+			state_version,
+		);
 
 		info!(target: LOG_TARGET, "injecting a total of {} top keys", top_kv.len());
 		for (k, v) in top_kv {
@@ -1165,4 +1190,21 @@ mod remote_tests {
 			std::fs::remove_file(d.path()).unwrap();
 		}
 	}
+
+	#[tokio::test]
+	async fn can_build_child_tree() {
+		init_logger();
+		Builder::<Block>::new()
+			.mode(Mode::Online(OnlineConfig {
+				// transport: "wss://kusama-rpc.polkadot.io".to_owned().into(),
+				transport: "ws://kianenigma-archive:9924".to_owned().into(),
+				// transport: "ws://localhost:9999".to_owned().into(),
+				pallets: vec!["Crowdloan".to_owned()],
+				..Default::default()
+			}))
+			.build()
+			.await
+			.expect(REMOTE_INACCESSIBLE)
+			.execute_with(|| {});
+	}
 }
diff --git a/substrate/utils/frame/try-runtime/cli/src/lib.rs b/substrate/utils/frame/try-runtime/cli/src/lib.rs
index ae7a1c3ae87..92721228c92 100644
--- a/substrate/utils/frame/try-runtime/cli/src/lib.rs
+++ b/substrate/utils/frame/try-runtime/cli/src/lib.rs
@@ -493,6 +493,7 @@ impl State {
 						transport: uri.to_owned().into(),
 						state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new),
 						pallets: pallets.clone().unwrap_or_default(),
+						scrape_children: true,
 						at,
 					}))
 					.inject_hashed_key(
-- 
GitLab