Commit f6c3d4c6 authored by David's avatar David Committed by Andronik Ordian

Use upstream rocksdb (#11248)

* Use upstream rocksdb

…by way of https://github.com/paritytech/parity-common/pull/257 by @ordian.

* Hint at how `parity db reset` works in the error message

* migration-rocksdb: fix build

* Cargo.toml: use git dependency instead of path

* update to latest kvdb-rocksdb

* fix tests

* saner default for light client

* rename open_db to open_db_light

* update to latest kvdb-rocksdb

* moar update to latest kvdb-rocksdb

* even moar update to latest kvdb-rocksdb

* use kvdb-rocksdb from crates.io

* Update parity/db/rocksdb/helpers.rs

* add docs to memory_budget division
parent 2895e3b2
Pipeline #71039 passed with stages
in 48 minutes and 29 seconds
This diff is collapsed.
......@@ -40,7 +40,7 @@ journaldb = { path = "util/journaldb" }
jsonrpc-core = "14.0.3"
keccak-hash = "0.4.0"
kvdb = "0.1"
kvdb-rocksdb = "0.1.5"
kvdb-rocksdb = "0.2.0"
log = "0.4"
migration-rocksdb = { path = "util/migration-rocksdb" }
node-filter = { path = "ethcore/node-filter" }
......@@ -135,3 +135,4 @@ members = [
"evmbin",
"parity-clib",
]
......@@ -33,7 +33,7 @@ journaldb = { path = "../util/journaldb" }
keccak-hash = "0.4.0"
kvdb = "0.1"
kvdb-memorydb = { version = "0.1.2", optional = true }
kvdb-rocksdb = { version = "0.1.5", optional = true }
kvdb-rocksdb = { version = "0.2.0", optional = true }
lazy_static = { version = "1.3", optional = true }
log = "0.4"
macros = { path = "../util/macros", optional = true }
......@@ -80,7 +80,7 @@ ethjson = { path = "../json", features = ["test-helpers"] }
parity-crypto = { version = "0.4.2", features = ["publickey"] }
fetch = { path = "../util/fetch" }
kvdb-memorydb = "0.1.2"
kvdb-rocksdb = "0.1.5"
kvdb-rocksdb = "0.2.0"
lazy_static = "1.3"
machine = { path = "./machine", features = ["test-helpers"] }
macros = { path = "../util/macros" }
......
......@@ -23,5 +23,5 @@ trace-time = "0.1"
[dev-dependencies]
ethcore = { path = "..", features = ["test-helpers"] }
ethcore-db = { path = "../db" }
kvdb-rocksdb = "0.1.5"
kvdb-rocksdb = "0.2.0"
tempdir = "0.3"
......@@ -293,6 +293,7 @@ where
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::sync::Arc;
use std::{time, thread};
......@@ -314,10 +315,9 @@ mod tests {
let client_path = tempdir.path().join("client");
let snapshot_path = tempdir.path().join("snapshot");
let client_config = ClientConfig::default();
let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS);
client_db_config.memory_budget = client_config.db_cache_size;
client_db_config.memory_budget = HashMap::new();
client_db_config.compaction = CompactionProfile::auto(&client_path);
let client_db_handler = test_helpers::restoration_db_handler(client_db_config.clone());
......
......@@ -53,7 +53,7 @@ ethabi-contract = "9.0.0"
ethabi-derive = "9.0.1"
ethcore = { path = "..", features = ["test-helpers"] }
ethkey = { path = "../../accounts/ethkey" }
kvdb-rocksdb = { version = "0.1.5" }
kvdb-rocksdb = "0.2.0"
lazy_static = { version = "1.3" }
spec = { path = "../spec" }
tempdir = "0.3"
......
......@@ -24,7 +24,7 @@ journaldb = { path = "../../../util/journaldb" }
keccak-hash = "0.4.0"
keccak-hasher = { path = "../../../util/keccak-hasher" }
kvdb = "0.1"
kvdb-rocksdb = { version = "0.1.5" }
kvdb-rocksdb = "0.2.0"
log = "0.4.8"
parking_lot = "0.9"
parity-crypto = { version = "0.4.2", features = ["publickey"] }
......
......@@ -1297,9 +1297,9 @@ impl DatabaseRestore for Client {
impl BlockChainReset for Client {
fn reset(&self, num: u32) -> Result<(), String> {
if num as u64 > self.pruning_history() {
return Err("Attempting to reset to block with pruned state".into())
return Err(format!("Attempting to reset the chain {} blocks back failed: state is pruned (max available: {})", num, self.pruning_history()).into())
} else if num == 0 {
return Err("invalid number of blocks to reset".into())
return Err("0 is an invalid number of blocks to reset".into())
}
let mut blocks_to_delete = Vec::with_capacity(num as usize);
......
......@@ -205,9 +205,11 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> {
config.queue.verifier_settings = cmd.verifier_settings;
// initialize database.
let db = db::open_db(&client_path.to_str().expect("DB path could not be converted to string."),
&cmd.cache_config,
&cmd.compaction).map_err(|e| format!("Failed to open database: {:?}", e))?;
let db = db::open_db_light(
&client_path.to_str().expect("DB path could not be converted to string."),
&cmd.cache_config,
&cmd.compaction,
).map_err(|e| format!("Failed to open database: {:?}", e))?;
// TODO: could epoch signals be available at the end of the file?
let fetch = ::light::client::fetch::unavailable();
......
......@@ -19,7 +19,7 @@
#[path="rocksdb/mod.rs"]
mod impls;
pub use self::impls::{open_db, restoration_db_handler, migrate};
pub use self::impls::{open_db_light, restoration_db_handler, migrate};
#[cfg(feature = "secretstore")]
pub use self::impls::open_secretstore_db;
......@@ -14,8 +14,8 @@
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
use std::collections::HashMap;
use std::path::Path;
use ethcore_db::NUM_COLUMNS;
use ethcore::client::{ClientConfig, DatabaseCompactionProfile};
use super::kvdb_rocksdb::{CompactionProfile, DatabaseConfig};
......@@ -27,10 +27,42 @@ pub fn compaction_profile(profile: &DatabaseCompactionProfile, db_path: &Path) -
}
}
/// Spreads the `total` (in MiB) memory budget across the db columns.
/// If it's `None`, the default memory budget will be used for each column.
pub fn memory_per_column(total: Option<usize>) -> HashMap<Option<u32>, usize> {
let mut memory_per_column = HashMap::new();
if let Some(budget) = total {
// spend 90% of the memory budget on the state column, but at least 256 MiB
memory_per_column.insert(ethcore_db::COL_STATE, std::cmp::max(budget * 9 / 10, 256));
let num_columns = ethcore_db::NUM_COLUMNS.expect("NUM_COLUMNS is Some; qed");
// spread the remaining 10% evenly across columns
let rest_budget = budget / 10 / (num_columns as usize - 1);
for i in 1..num_columns {
// but at least 16 MiB for each column
memory_per_column.insert(Some(i), std::cmp::max(rest_budget, 16));
}
}
memory_per_column
}
/// Spreads the `total` (in MiB) memory budget across the light db columns.
pub fn memory_per_column_light(total: usize) -> HashMap<Option<u32>, usize> {
let mut memory_per_column = HashMap::new();
let num_columns = ethcore_db::NUM_COLUMNS.expect("NUM_COLUMNS is Some; qed");
// spread the memory budget evenly across columns
// light client doesn't use the state column
let per_column = total / (num_columns as usize - 1);
for i in 1..num_columns {
// but at least 4 MiB for each column
memory_per_column.insert(Some(i), std::cmp::max(per_column, 4));
}
memory_per_column
}
pub fn client_db_config(client_path: &Path, client_config: &ClientConfig) -> DatabaseConfig {
let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS);
let mut client_db_config = DatabaseConfig::with_columns(ethcore_db::NUM_COLUMNS);
client_db_config.memory_budget = client_config.db_cache_size;
client_db_config.memory_budget = memory_per_column(client_config.db_cache_size);
client_db_config.compaction = compaction_profile(&client_config.db_compaction, &client_path);
client_db_config
......
......@@ -224,9 +224,9 @@ pub fn migrate(path: &Path, compaction_profile: &DatabaseCompactionProfile) -> R
println!("Migrating blooms to blooms-db...");
let db_config = DatabaseConfig {
max_open_files: 64,
memory_budget: None,
compaction: compaction_profile,
columns: ethcore_db::NUM_COLUMNS,
..Default::default()
};
migrate_blooms(&db_path, &db_config).map_err(Error::BloomsDB)?;
......
......@@ -86,8 +86,8 @@ pub fn restoration_db_handler(client_path: &Path, client_config: &ClientConfig)
})
}
/// Open a new main DB.
pub fn open_db(
/// Open a new light client DB.
pub fn open_db_light(
client_path: &str,
cache_config: &CacheConfig,
compaction: &DatabaseCompactionProfile
......@@ -95,7 +95,7 @@ pub fn open_db(
let path = Path::new(client_path);
let db_config = DatabaseConfig {
memory_budget: Some(cache_config.blockchain() as usize * 1024 * 1024),
memory_budget: helpers::memory_per_column_light(cache_config.blockchain() as usize),
compaction: helpers::compaction_profile(&compaction, path),
.. DatabaseConfig::with_columns(NUM_COLUMNS)
};
......
......@@ -86,9 +86,11 @@ pub fn execute(cmd: ExportHsyncCmd) -> Result<String, String> {
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
// initialize database.
let db = db::open_db(&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string."),
&cmd.cache_config,
&cmd.compaction).map_err(|e| format!("Failed to open database {:?}", e))?;
let db = db::open_db_light(
&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string."),
&cmd.cache_config,
&cmd.compaction,
).map_err(|e| format!("Failed to open database {:?}", e))?;
let service = light_client::Service::start(config, &spec, UnavailableDataFetcher, db, cache)
.map_err(|e| format!("Error starting light client: {}", e))?;
......
......@@ -253,9 +253,11 @@ fn execute_light_impl<Cr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq
};
// initialize database.
let db = db::open_db(&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string."),
&cmd.cache_config,
&cmd.compaction).map_err(|e| format!("Failed to open database {:?}", e))?;
let db = db::open_db_light(
&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string."),
&cmd.cache_config,
&cmd.compaction,
).map_err(|e| format!("Failed to open database {:?}", e))?;
let service = light_client::Service::start(config, &spec, fetch, db, cache.clone())
.map_err(|e| format!("Error starting light client: {}", e))?;
......
......@@ -45,7 +45,7 @@ env_logger = "0.5"
ethkey = { path = "../accounts/ethkey" }
ethcore = { path = "../ethcore", features = ["test-helpers"] }
tempdir = "0.3"
kvdb-rocksdb = "0.1.5"
kvdb-rocksdb = "0.2.0"
[features]
accounts = ["ethcore-accounts"]
......@@ -7,7 +7,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
log = "0.4"
macros = { path = "../macros" }
kvdb = "0.1"
kvdb-rocksdb = "0.1.5"
kvdb-rocksdb = "0.2.0"
[dev-dependencies]
tempdir = "0.3"
......@@ -134,12 +134,7 @@ impl<T: SimpleMigration> Migration for T {
let migration_needed = col == SimpleMigration::migrated_column_index(self);
let mut batch = Batch::new(config, col);
let iter = match source.iter(col) {
Some(iter) => iter,
None => return Ok(()),
};
for (key, value) in iter {
for (key, value) in source.iter(col) {
if migration_needed {
if let Some((key, value)) = self.simple_migrate(key.into_vec(), value.into_vec()) {
batch.insert(key, value, dest)?;
......@@ -249,9 +244,9 @@ impl Manager {
trace!(target: "migration", "Expecting database to contain {:?} columns", columns);
let mut db_config = DatabaseConfig {
max_open_files: 64,
memory_budget: None,
compaction: config.compaction_profile,
columns: columns,
columns,
..Default::default()
};
let db_root = database_path(old_path);
......
......@@ -116,7 +116,7 @@ impl Migration for AddsColumn {
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: Option<u32>) -> io::Result<()> {
let mut batch = Batch::new(config, col);
for (key, value) in source.iter(col).into_iter().flat_map(|inner| inner) {
for (key, value) in source.iter(col) {
batch.insert(key.into_vec(), value.into_vec(), dest)?;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment