Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
let bytes = fs::read(path).map_err(|_| "fs::read failed.")?;
Decode::decode(&mut &*bytes).map_err(|_| "decode failed")
}
async fn do_load_remote(&mut self) -> Result<RemoteExternalities<B>, &'static str> {
self.init_remote_client().await?;
let block_hash = self.as_online().at_expected();
let inner_ext = self.load_remote_and_maybe_save().await?;
Ok(RemoteExternalities { block_hash, inner_ext })
}
fn do_load_offline(
&mut self,
config: OfflineConfig,
) -> Result<RemoteExternalities<B>, &'static str> {
let Snapshot { block_hash, top, child, state_version } =
self.load_snapshot(config.state_snapshot.path.clone())?;
let mut inner_ext = TestExternalities::new_with_code_and_state(
Default::default(),
Default::default(),
self.overwrite_state_version.unwrap_or(state_version),
);
info!(target: LOG_TARGET, "injecting a total of {} top keys", top.len());
let top = top
.into_iter()
.filter(|(k, _)| !is_default_child_storage_key(k.as_ref()))
.map(|(k, v)| (k.0, v.0))
.collect::<Vec<_>>();
inner_ext.batch_insert(top);
info!(
target: LOG_TARGET,
"injecting a total of {} child keys",
child.iter().flat_map(|(_, kv)| kv).count()
);
for (info, key_values) in child {
for (k, v) in key_values {
inner_ext.insert_child(info.clone(), k.0, v.0);
}
}
Ok(RemoteExternalities { inner_ext, block_hash })
}
pub(crate) async fn pre_build(mut self) -> Result<RemoteExternalities<B>, &'static str> {
let mut ext = match self.mode.clone() {
Mode::Offline(config) => self.do_load_offline(config)?,
Mode::Online(_) => self.do_load_remote().await?,
Mode::OfflineOrElseOnline(offline_config, _) => {
match self.do_load_offline(offline_config) {
Ok(x) => x,
Err(_) => self.do_load_remote().await?,
// inject manual key values.
if !self.hashed_key_values.is_empty() {
target: LOG_TARGET,
"extending externalities with {} manually injected key-values",
self.hashed_key_values.len()
);
ext.batch_insert(self.hashed_key_values.into_iter().map(|(k, v)| (k.0, v.0)));
}
// exclude manual key values.
if !self.hashed_blacklist.is_empty() {
target: LOG_TARGET,
"excluding externalities from {} keys",
self.hashed_blacklist.len()
);
for k in self.hashed_blacklist {
ext.execute_with(|| sp_io::storage::clear(&k));
}
}
}
// Public methods
impl<B: BlockT + DeserializeOwned> Builder<B>
where
B::Hash: DeserializeOwned,
B::Header: DeserializeOwned,
{
/// Create a new builder.
pub fn new() -> Self {
Default::default()
}
/// Inject a manual list of key and values to the storage.
pub fn inject_hashed_key_value(mut self, injections: Vec<KeyValue>) -> Self {
self.hashed_key_values.push(i.clone());
/// Blacklist this hashed key from the final externalities. This is treated as-is, and should be
/// pre-hashed.
pub fn blacklist_hashed_key(mut self, hashed: &[u8]) -> Self {
self.hashed_blacklist.push(hashed.to_vec());
self
}
/// Configure a state snapshot to be used.
pub fn mode(mut self, mode: Mode<B>) -> Self {
self.mode = mode;
self
}
pub fn overwrite_state_version(mut self, version: StateVersion) -> Self {
self.overwrite_state_version = Some(version);
Kian Paimani
committed
self
}
pub async fn build(self) -> Result<RemoteExternalities<B>, &'static str> {
let mut ext = self.pre_build().await?;
ext.commit_all().unwrap();
info!(
target: LOG_TARGET,
"initialized state externalities with storage root {:?} and state_version {:?}",
ext.as_backend().root(),
ext.state_version
);
}
}
#[cfg(test)]
use tracing_subscriber::EnvFilter;
pub(crate) use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash};
pub(crate) type Block = RawBlock<ExtrinsicWrapper<Hash>>;
let _ = tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.with_level(true)
#[cfg(test)]
mod tests {
use super::test_prelude::*;
#[tokio::test(flavor = "multi_thread")]
async fn can_load_state_snapshot() {
Builder::<Block>::new()
.mode(Mode::Offline(OfflineConfig {
state_snapshot: SnapshotConfig::new("test_data/proxy_test"),
#[tokio::test(flavor = "multi_thread")]
async fn can_exclude_from_snapshot() {
// get the first key from the snapshot file.
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
let some_key = Builder::<Block>::new()
.mode(Mode::Offline(OfflineConfig {
state_snapshot: SnapshotConfig::new("test_data/proxy_test"),
}))
.build()
.await
.expect("Can't read state snapshot file")
.execute_with(|| {
let key =
sp_io::storage::next_key(&[]).expect("some key must exist in the snapshot");
assert!(sp_io::storage::get(&key).is_some());
key
});
Builder::<Block>::new()
.mode(Mode::Offline(OfflineConfig {
state_snapshot: SnapshotConfig::new("test_data/proxy_test"),
}))
.blacklist_hashed_key(&some_key)
.build()
.await
.expect("Can't read state snapshot file")
.execute_with(|| assert!(sp_io::storage::get(&some_key).is_none()));
}
}
#[cfg(all(test, feature = "remote-test"))]
mod remote_tests {
use super::test_prelude::*;
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
use std::os::unix::fs::MetadataExt;
#[tokio::test(flavor = "multi_thread")]
async fn state_version_is_kept_and_can_be_altered() {
const CACHE: &'static str = "state_version_is_kept_and_can_be_altered";
init_logger();
// first, build a snapshot.
let ext = Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
pallets: vec!["Proxy".to_owned()],
child_trie: false,
state_snapshot: Some(SnapshotConfig::new(CACHE)),
..Default::default()
}))
.build()
.await
.unwrap();
// now re-create the same snapshot.
let cached_ext = Builder::<Block>::new()
.mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) }))
.build()
.await
.unwrap();
assert_eq!(ext.state_version, cached_ext.state_version);
// now overwrite it
let other = match ext.state_version {
StateVersion::V0 => StateVersion::V1,
StateVersion::V1 => StateVersion::V0,
};
let cached_ext = Builder::<Block>::new()
.mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) }))
.overwrite_state_version(other)
.build()
.await
.unwrap();
assert_eq!(cached_ext.state_version, other);
}
#[tokio::test(flavor = "multi_thread")]
async fn snapshot_block_hash_works() {
const CACHE: &'static str = "snapshot_block_hash_works";
init_logger();
// first, build a snapshot.
let ext = Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
pallets: vec!["Proxy".to_owned()],
child_trie: false,
state_snapshot: Some(SnapshotConfig::new(CACHE)),
..Default::default()
}))
.build()
.await
.unwrap();
// now re-create the same snapshot.
let cached_ext = Builder::<Block>::new()
.mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) }))
.build()
.await
.unwrap();
assert_eq!(ext.block_hash, cached_ext.block_hash);
}
#[tokio::test(flavor = "multi_thread")]
const CACHE: &'static str = "offline_else_online_works_data";
// this shows that in the second run, we use the remote and create a snapshot.
Builder::<Block>::new()
.mode(Mode::OfflineOrElseOnline(
OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) },
OnlineConfig {
pallets: vec!["Proxy".to_owned()],
child_trie: false,
state_snapshot: Some(SnapshotConfig::new(CACHE)),
..Default::default()
},
))
.build()
.await
.execute_with(|| {});
// this shows that in the second run, we are not using the remote
Builder::<Block>::new()
.mode(Mode::OfflineOrElseOnline(
OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) },
OnlineConfig {
transport: "ws://non-existent:666".to_owned().into(),
..Default::default()
},
))
.build()
.await
let to_delete = std::fs::read_dir(Path::new("."))
.unwrap()
.into_iter()
.map(|d| d.unwrap())
.filter(|p| p.path().file_name().unwrap_or_default() == CACHE)
.collect::<Vec<_>>();
assert!(to_delete.len() == 1);
std::fs::remove_file(to_delete[0].path()).unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn can_build_one_small_pallet() {
init_logger();
Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
pallets: vec!["Proxy".to_owned()],
child_trie: false,
.execute_with(|| {});
}
#[tokio::test(flavor = "multi_thread")]
async fn can_build_few_pallet() {
init_logger();
Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
pallets: vec!["Proxy".to_owned(), "Multisig".to_owned()],
..Default::default()
}))
.build()
.await
.execute_with(|| {});
}
#[tokio::test(flavor = "multi_thread")]
async fn can_create_snapshot() {
const CACHE: &'static str = "can_create_snapshot";
init_logger();
Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
state_snapshot: Some(SnapshotConfig::new(CACHE)),
pallets: vec!["Proxy".to_owned()],
..Default::default()
}))
.build()
.await
.execute_with(|| {});
let to_delete = std::fs::read_dir(Path::new("."))
.unwrap()
.into_iter()
.map(|d| d.unwrap())
.filter(|p| p.path().file_name().unwrap_or_default() == CACHE)
.collect::<Vec<_>>();
let snap: Snapshot<Block> = Builder::<Block>::new().load_snapshot(CACHE.into()).unwrap();
assert!(matches!(snap, Snapshot { top, child, .. } if top.len() > 0 && child.len() == 0));
assert!(to_delete.len() == 1);
let to_delete = to_delete.first().unwrap();
assert!(std::fs::metadata(to_delete.path()).unwrap().size() > 1);
std::fs::remove_file(to_delete.path()).unwrap();
#[tokio::test(flavor = "multi_thread")]
async fn can_create_child_snapshot() {
const CACHE: &'static str = "can_create_child_snapshot";
.mode(Mode::Online(OnlineConfig {
state_snapshot: Some(SnapshotConfig::new(CACHE)),
pallets: vec!["Crowdloan".to_owned()],
..Default::default()
}))
.build()
.await
let to_delete = std::fs::read_dir(Path::new("."))
.unwrap()
.into_iter()
.map(|d| d.unwrap())
.filter(|p| p.path().file_name().unwrap_or_default() == CACHE)
let snap: Snapshot<Block> = Builder::<Block>::new().load_snapshot(CACHE.into()).unwrap();
assert!(matches!(snap, Snapshot { top, child, .. } if top.len() > 0 && child.len() > 0));
assert!(to_delete.len() == 1);
let to_delete = to_delete.first().unwrap();
assert!(std::fs::metadata(to_delete.path()).unwrap().size() > 1);
std::fs::remove_file(to_delete.path()).unwrap();
#[tokio::test(flavor = "multi_thread")]
async fn can_build_big_pallet() {
if std::option_env!("TEST_WS").is_none() {
return
}
.mode(Mode::Online(OnlineConfig {
transport: std::option_env!("TEST_WS").unwrap().to_owned().into(),
pallets: vec!["Staking".to_owned()],
child_trie: false,
..Default::default()
}))
#[tokio::test(flavor = "multi_thread")]
async fn can_fetch_all() {
if std::option_env!("TEST_WS").is_none() {
return
}
init_logger();
Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
transport: std::option_env!("TEST_WS").unwrap().to_owned().into(),
..Default::default()
}))
.build()
.await