Newer
Older
let online_config = self.as_online_mut();
online_config.pallets.iter().for_each(|p| {
online_config
.hashed_prefixes
.push(sp_crypto_hashing::twox_128(p.as_bytes()).to_vec())
});
if online_config.child_trie {
online_config.hashed_prefixes.push(DEFAULT_CHILD_STORAGE_KEY_PREFIX.to_vec());
}
// Finally, if by now, we have put any limitations on prefixes that we are interested in, we
// download everything.
if online_config
.hashed_prefixes
.iter()
.filter(|p| *p != DEFAULT_CHILD_STORAGE_KEY_PREFIX)
.count() == 0
{
log::info!(
target: LOG_TARGET,
"since no prefix is filtered, the data for all pallets will be downloaded"
);
online_config.hashed_prefixes.push(vec![]);
}
/// Load the data from a remote server. The main code path is calling into `load_top_remote` and
/// `load_child_remote`.
///
/// Must be called after `init_remote_client`.
async fn load_remote_and_maybe_save(
&mut self,
) -> Result<TestExternalities<HashingFor<B>>, &'static str> {
let state_version =
StateApi::<B::Hash>::runtime_version(self.as_online().rpc_client(), None)
.await
.map_err(|e| {
error!(target: LOG_TARGET, "Error = {:?}", e);
"rpc runtime_version failed."
})
.map(|v| v.state_version())?;
let mut pending_ext = TestExternalities::new_with_code_and_state(
Default::default(),
Default::default(),
self.overwrite_state_version.unwrap_or(state_version),
);
Liam Aharon
committed
// Load data from the remote into `pending_ext`.
let top_kv = self.load_top_remote(&mut pending_ext).await?;
Liam Aharon
committed
self.load_child_remote(&top_kv, &mut pending_ext).await?;
Liam Aharon
committed
// If we need to save a snapshot, save the raw storage and root hash to the snapshot.
if let Some(path) = self.as_online().state_snapshot.clone().map(|c| c.path) {
Liam Aharon
committed
let (raw_storage, storage_root) = pending_ext.into_raw_snapshot();
Liam Aharon
committed
let snapshot = Snapshot::<B>::new(
Liam Aharon
committed
self.as_online()
.at
.expect("set to `Some` in `init_remote_client`; must be called before; qed"),
Liam Aharon
committed
raw_storage.clone(),
Liam Aharon
committed
storage_root,
Liam Aharon
committed
);
let encoded = snapshot.encode();
log::info!(
target: LOG_TARGET,
"writing snapshot of {} bytes to {:?}",
encoded.len(),
path
);
std::fs::write(path, encoded).map_err(|_| "fs::write failed")?;
Liam Aharon
committed
// pending_ext was consumed when creating the snapshot, need to reinitailize it
return Ok(TestExternalities::from_raw_snapshot(
raw_storage,
storage_root,
Liam Aharon
committed
self.overwrite_state_version.unwrap_or(state_version),
}
Ok(pending_ext)
}
async fn do_load_remote(&mut self) -> Result<RemoteExternalities<B>, &'static str> {
self.init_remote_client().await?;
let block_hash = self.as_online().at_expected();
let inner_ext = self.load_remote_and_maybe_save().await?;
Ok(RemoteExternalities { block_hash, inner_ext })
}
fn do_load_offline(
&mut self,
config: OfflineConfig,
) -> Result<RemoteExternalities<B>, &'static str> {
Liam Aharon
committed
let mut sp = Spinner::with_timer(Spinners::Dots, "Loading snapshot...".into());
let start = Instant::now();
Liam Aharon
committed
info!(target: LOG_TARGET, "Loading snapshot from {:?}", &config.state_snapshot.path);
let Snapshot { snapshot_version: _, block_hash, state_version, raw_storage, storage_root } =
Snapshot::<B>::load(&config.state_snapshot.path)?;
let inner_ext = TestExternalities::from_raw_snapshot(
raw_storage,
storage_root,
self.overwrite_state_version.unwrap_or(state_version),
);
Liam Aharon
committed
sp.stop_with_message(format!("✅ Loaded snapshot ({:.2}s)", start.elapsed().as_secs_f32()));
Ok(RemoteExternalities { inner_ext, block_hash })
}
pub(crate) async fn pre_build(mut self) -> Result<RemoteExternalities<B>, &'static str> {
let mut ext = match self.mode.clone() {
Mode::Offline(config) => self.do_load_offline(config)?,
Mode::Online(_) => self.do_load_remote().await?,
Mode::OfflineOrElseOnline(offline_config, _) => {
match self.do_load_offline(offline_config) {
Ok(x) => x,
Err(_) => self.do_load_remote().await?,
// inject manual key values.
if !self.hashed_key_values.is_empty() {
target: LOG_TARGET,
"extending externalities with {} manually injected key-values",
self.hashed_key_values.len()
);
ext.batch_insert(self.hashed_key_values.into_iter().map(|(k, v)| (k.0, v.0)));
}
// exclude manual key values.
if !self.hashed_blacklist.is_empty() {
target: LOG_TARGET,
"excluding externalities from {} keys",
self.hashed_blacklist.len()
);
for k in self.hashed_blacklist {
ext.execute_with(|| sp_io::storage::clear(&k));
}
}
}
// Public methods
impl<B: BlockT + DeserializeOwned> Builder<B>
where
B::Hash: DeserializeOwned,
B::Header: DeserializeOwned,
{
/// Create a new builder.
pub fn new() -> Self {
Default::default()
}
/// Inject a manual list of key and values to the storage.
pub fn inject_hashed_key_value(mut self, injections: Vec<KeyValue>) -> Self {
self.hashed_key_values.push(i.clone());
/// Blacklist this hashed key from the final externalities. This is treated as-is, and should be
/// pre-hashed.
pub fn blacklist_hashed_key(mut self, hashed: &[u8]) -> Self {
self.hashed_blacklist.push(hashed.to_vec());
self
}
/// Configure a state snapshot to be used.
pub fn mode(mut self, mode: Mode<B>) -> Self {
self.mode = mode;
self
}
pub fn overwrite_state_version(mut self, version: StateVersion) -> Self {
self.overwrite_state_version = Some(version);
Kian Paimani
committed
self
}
pub async fn build(self) -> Result<RemoteExternalities<B>, &'static str> {
let mut ext = self.pre_build().await?;
ext.commit_all().unwrap();
info!(
target: LOG_TARGET,
"initialized state externalities with storage root {:?} and state_version {:?}",
ext.as_backend().root(),
ext.state_version
);
}
}
#[cfg(test)]
mod test_prelude {
pub(crate) use super::*;
pub(crate) use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash};
pub(crate) type Block = RawBlock<ExtrinsicWrapper<Hash>>;
#[cfg(test)]
mod tests {
use super::test_prelude::*;
Liam Aharon
committed
#[tokio::test]
async fn can_load_state_snapshot() {
Builder::<Block>::new()
.mode(Mode::Offline(OfflineConfig {
state_snapshot: SnapshotConfig::new("test_data/proxy_test"),
Liam Aharon
committed
#[tokio::test]
async fn can_exclude_from_snapshot() {
// get the first key from the snapshot file.
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
let some_key = Builder::<Block>::new()
.mode(Mode::Offline(OfflineConfig {
state_snapshot: SnapshotConfig::new("test_data/proxy_test"),
}))
.build()
.await
.expect("Can't read state snapshot file")
.execute_with(|| {
let key =
sp_io::storage::next_key(&[]).expect("some key must exist in the snapshot");
assert!(sp_io::storage::get(&key).is_some());
key
});
Builder::<Block>::new()
.mode(Mode::Offline(OfflineConfig {
state_snapshot: SnapshotConfig::new("test_data/proxy_test"),
}))
.blacklist_hashed_key(&some_key)
.build()
.await
.expect("Can't read state snapshot file")
.execute_with(|| assert!(sp_io::storage::get(&some_key).is_none()));
}
}
#[cfg(all(test, feature = "remote-test"))]
mod remote_tests {
use super::test_prelude::*;
use std::{env, os::unix::fs::MetadataExt};
fn endpoint() -> String {
env::var("TEST_WS").unwrap_or_else(|_| DEFAULT_HTTP_ENDPOINT.to_string())
}
Liam Aharon
committed
#[tokio::test]
async fn state_version_is_kept_and_can_be_altered() {
const CACHE: &'static str = "state_version_is_kept_and_can_be_altered";
init_logger();
// first, build a snapshot.
let ext = Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
transport: endpoint().clone().into(),
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
pallets: vec!["Proxy".to_owned()],
child_trie: false,
state_snapshot: Some(SnapshotConfig::new(CACHE)),
..Default::default()
}))
.build()
.await
.unwrap();
// now re-create the same snapshot.
let cached_ext = Builder::<Block>::new()
.mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) }))
.build()
.await
.unwrap();
assert_eq!(ext.state_version, cached_ext.state_version);
// now overwrite it
let other = match ext.state_version {
StateVersion::V0 => StateVersion::V1,
StateVersion::V1 => StateVersion::V0,
};
let cached_ext = Builder::<Block>::new()
.mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) }))
.overwrite_state_version(other)
.build()
.await
.unwrap();
assert_eq!(cached_ext.state_version, other);
}
Liam Aharon
committed
#[tokio::test]
async fn snapshot_block_hash_works() {
const CACHE: &'static str = "snapshot_block_hash_works";
init_logger();
// first, build a snapshot.
let ext = Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
transport: endpoint().clone().into(),
pallets: vec!["Proxy".to_owned()],
child_trie: false,
state_snapshot: Some(SnapshotConfig::new(CACHE)),
..Default::default()
}))
.build()
.await
.unwrap();
// now re-create the same snapshot.
let cached_ext = Builder::<Block>::new()
.mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) }))
.build()
.await
.unwrap();
assert_eq!(ext.block_hash, cached_ext.block_hash);
}
#[tokio::test]
async fn child_keys_are_loaded() {
const CACHE: &'static str = "snapshot_retains_storage";
init_logger();
// create an ext with children keys
let child_ext = Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
transport: endpoint().clone().into(),
pallets: vec!["Proxy".to_owned()],
child_trie: true,
state_snapshot: Some(SnapshotConfig::new(CACHE)),
..Default::default()
}))
.build()
.await
.unwrap();
// create an ext without children keys
let ext = Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
transport: endpoint().clone().into(),
pallets: vec!["Proxy".to_owned()],
child_trie: false,
state_snapshot: Some(SnapshotConfig::new(CACHE)),
..Default::default()
}))
.build()
.await
.unwrap();
// there should be more keys in the child ext.
assert!(
child_ext.as_backend().backend_storage().keys().len() >
ext.as_backend().backend_storage().keys().len()
);
}
Liam Aharon
committed
#[tokio::test]
const CACHE: &'static str = "offline_else_online_works_data";
// this shows that in the second run, we use the remote and create a snapshot.
Builder::<Block>::new()
.mode(Mode::OfflineOrElseOnline(
OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) },
transport: endpoint().clone().into(),
child_trie: false,
state_snapshot: Some(SnapshotConfig::new(CACHE)),
..Default::default()
},
))
.build()
.await
.execute_with(|| {});
// this shows that in the second run, we are not using the remote
Builder::<Block>::new()
.mode(Mode::OfflineOrElseOnline(
OfflineConfig { state_snapshot: SnapshotConfig::new(CACHE) },
OnlineConfig {
transport: "ws://non-existent:666".to_owned().into(),
..Default::default()
},
))
.build()
.await
let to_delete = std::fs::read_dir(Path::new("."))
.unwrap()
.into_iter()
.map(|d| d.unwrap())
.filter(|p| p.path().file_name().unwrap_or_default() == CACHE)
.collect::<Vec<_>>();
assert!(to_delete.len() == 1);
std::fs::remove_file(to_delete[0].path()).unwrap();
}
Liam Aharon
committed
#[tokio::test]
async fn can_build_one_small_pallet() {
init_logger();
Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
transport: endpoint().clone().into(),
pallets: vec!["Proxy".to_owned()],
child_trie: false,
.execute_with(|| {});
}
Liam Aharon
committed
#[tokio::test]
async fn can_build_few_pallet() {
init_logger();
Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
transport: endpoint().clone().into(),
pallets: vec!["Proxy".to_owned(), "Multisig".to_owned()],
..Default::default()
}))
.build()
.await
.execute_with(|| {});
}
#[tokio::test(flavor = "multi_thread")]
async fn can_create_snapshot() {
const CACHE: &'static str = "can_create_snapshot";
init_logger();
Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
transport: endpoint().clone().into(),
state_snapshot: Some(SnapshotConfig::new(CACHE)),
pallets: vec!["Proxy".to_owned()],
..Default::default()
}))
.build()
.await
.execute_with(|| {});
let to_delete = std::fs::read_dir(Path::new("."))
.unwrap()
.into_iter()
.map(|d| d.unwrap())
.filter(|p| p.path().file_name().unwrap_or_default() == CACHE)
.collect::<Vec<_>>();
assert!(to_delete.len() == 1);
let to_delete = to_delete.first().unwrap();
assert!(std::fs::metadata(to_delete.path()).unwrap().size() > 1);
std::fs::remove_file(to_delete.path()).unwrap();
Liam Aharon
committed
#[tokio::test]
async fn can_create_child_snapshot() {
const CACHE: &'static str = "can_create_child_snapshot";
.mode(Mode::Online(OnlineConfig {
transport: endpoint().clone().into(),
state_snapshot: Some(SnapshotConfig::new(CACHE)),
pallets: vec!["Crowdloan".to_owned()],
..Default::default()
}))
.build()
.await
let to_delete = std::fs::read_dir(Path::new("."))
.unwrap()
.into_iter()
.map(|d| d.unwrap())
.filter(|p| p.path().file_name().unwrap_or_default() == CACHE)
assert!(to_delete.len() == 1);
let to_delete = to_delete.first().unwrap();
assert!(std::fs::metadata(to_delete.path()).unwrap().size() > 1);
std::fs::remove_file(to_delete.path()).unwrap();
Liam Aharon
committed
#[tokio::test]
async fn can_build_big_pallet() {
if std::option_env!("TEST_WS").is_none() {
return
}
.mode(Mode::Online(OnlineConfig {
transport: endpoint().clone().into(),
pallets: vec!["Staking".to_owned()],
child_trie: false,
..Default::default()
}))
Liam Aharon
committed
#[tokio::test]
async fn can_fetch_all() {
if std::option_env!("TEST_WS").is_none() {
return
}
init_logger();
Builder::<Block>::new()
.mode(Mode::Online(OnlineConfig {
transport: endpoint().clone().into(),
..Default::default()
}))
.build()
.await
#[tokio::test]
async fn can_fetch_in_parallel() {
init_logger();
let mut builder = Builder::<Block>::new().mode(Mode::Online(OnlineConfig {
transport: endpoint().clone().into(),
..Default::default()
}));
builder.init_remote_client().await.unwrap();
let at = builder.as_online().at.unwrap();
let prefix = StorageKey(vec![13]);
let paged = builder.rpc_get_keys_in_range(&prefix, at, None, None).await.unwrap();
let para = builder.rpc_get_keys_parallel(&prefix, at, 4).await.unwrap();
assert_eq!(paged, para);
let prefix = StorageKey(vec![]);
let paged = builder.rpc_get_keys_in_range(&prefix, at, None, None).await.unwrap();
let para = builder.rpc_get_keys_parallel(&prefix, at, 8).await.unwrap();
assert_eq!(paged, para);
}