Skip to content
Snippets Groups Projects
Commit cdc59db5 authored by Shawn Tabrizi's avatar Shawn Tabrizi Committed by GitHub
Browse files

Make Benchmark Output Analysis Function Configurable (#8228)


* Integrate `output-analysis`

* fix test

* use default

* cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_system --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/system/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* Update frame/system/src/weights.rs

* cargo run --release --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_system --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/system/src/weights.rs --template=./.maintain/frame-weight-template.hbs --output-analysis=max

* Update frame/system/src/weights.rs

* dont discard value_dist and model

* feedback

Co-authored-by: default avatarParity Benchmarking Bot <admin@parity.io>
parent 7c1dd95e
Branches
No related merge requests found
......@@ -18,6 +18,7 @@
//! Tools for analyzing the benchmark results.
use std::collections::BTreeMap;
use core::convert::TryFrom;
use linregress::{FormulaRegressionBuilder, RegressionDataBuilder};
use crate::BenchmarkResults;
......@@ -31,6 +32,7 @@ pub struct Analysis {
pub model: Option<RegressionModel>,
}
#[derive(Clone, Copy)]
pub enum BenchmarkSelector {
ExtrinsicTime,
StorageRootTime,
......@@ -38,6 +40,40 @@ pub enum BenchmarkSelector {
Writes,
}
#[derive(Debug)]
pub enum AnalysisChoice {
/// Use minimum squares regression for analyzing the benchmarking results.
MinSquares,
/// Use median slopes for analyzing the benchmarking results.
MedianSlopes,
/// Use the maximum values among all other analysis functions for the benchmarking results.
Max,
}
impl Default for AnalysisChoice {
fn default() -> Self {
AnalysisChoice::MinSquares
}
}
impl TryFrom<Option<String>> for AnalysisChoice {
type Error = &'static str;
fn try_from(s: Option<String>) -> Result<Self, Self::Error> {
match s {
None => Ok(AnalysisChoice::default()),
Some(i) => {
match &i[..] {
"min-squares" | "min_squares" => Ok(AnalysisChoice::MinSquares),
"median-slopes" | "median_slopes" => Ok(AnalysisChoice::MedianSlopes),
"max" => Ok(AnalysisChoice::Max),
_ => Err("invalid analysis string")
}
}
}
}
}
impl Analysis {
// Useful for when there are no components, and we just need an median value of the benchmark results.
// Note: We choose the median value because it is more robust to outliers.
......@@ -215,6 +251,39 @@ impl Analysis {
model: Some(model),
})
}
pub fn max(r: &Vec<BenchmarkResults>, selector: BenchmarkSelector) -> Option<Self> {
let median_slopes = Self::median_slopes(r, selector);
let min_squares = Self::min_squares_iqr(r, selector);
if median_slopes.is_none() || min_squares.is_none() {
return None;
}
let median_slopes = median_slopes.unwrap();
let min_squares = min_squares.unwrap();
let base = median_slopes.base.max(min_squares.base);
let slopes = median_slopes.slopes.into_iter()
.zip(min_squares.slopes.into_iter())
.map(|(a, b): (u128, u128)| { a.max(b) })
.collect::<Vec<u128>>();
// components should always be in the same order
median_slopes.names.iter()
.zip(min_squares.names.iter())
.for_each(|(a, b)| assert!(a == b, "benchmark results not in the same order"));
let names = median_slopes.names;
let value_dists = min_squares.value_dists;
let model = min_squares.model;
Some(Self {
base,
slopes,
names,
value_dists,
model,
})
}
}
fn ms(mut nanos: u128) -> String {
......
......@@ -26,7 +26,7 @@ mod analysis;
pub use utils::*;
#[cfg(feature = "std")]
pub use analysis::{Analysis, BenchmarkSelector, RegressionModel};
pub use analysis::{Analysis, BenchmarkSelector, RegressionModel, AnalysisChoice};
#[doc(hidden)]
pub use sp_io::storage::root as storage_root;
#[doc(hidden)]
......
......@@ -18,7 +18,7 @@
//! Autogenerated weights for frame_system
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
//! DATE: 2021-02-27, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: []
//! DATE: 2021-02-28, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: []
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128
// Executed Command:
......@@ -34,6 +34,7 @@
// --heap-pages=4096
// --output=./frame/system/src/weights.rs
// --template=./.maintain/frame-weight-template.hbs
// --output-analysis=max
#![allow(unused_parens)]
......@@ -57,38 +58,38 @@ pub trait WeightInfo {
pub struct SubstrateWeight<T>(PhantomData<T>);
impl<T: crate::Config> WeightInfo for SubstrateWeight<T> {
fn remark(_b: u32, ) -> Weight {
(1_296_000 as Weight)
(1_345_000 as Weight)
}
fn remark_with_event(b: u32, ) -> Weight {
(13_474_000 as Weight)
(9_697_000 as Weight)
// Standard Error: 0
.saturating_add((1_000 as Weight).saturating_mul(b as Weight))
}
fn set_heap_pages() -> Weight {
(2_024_000 as Weight)
(2_070_000 as Weight)
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_changes_trie_config() -> Weight {
(10_551_000 as Weight)
(10_111_000 as Weight)
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn set_storage(i: u32, ) -> Weight {
(0 as Weight)
// Standard Error: 0
.saturating_add((612_000 as Weight).saturating_mul(i as Weight))
.saturating_add((619_000 as Weight).saturating_mul(i as Weight))
.saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight)))
}
fn kill_storage(i: u32, ) -> Weight {
(562_000 as Weight)
(1_647_000 as Weight)
// Standard Error: 0
.saturating_add((442_000 as Weight).saturating_mul(i as Weight))
.saturating_add((460_000 as Weight).saturating_mul(i as Weight))
.saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight)))
}
fn kill_prefix(p: u32, ) -> Weight {
(10_499_000 as Weight)
// Standard Error: 1_000
.saturating_add((840_000 as Weight).saturating_mul(p as Weight))
(10_678_000 as Weight)
// Standard Error: 0
.saturating_add((862_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight)))
}
}
......@@ -96,38 +97,38 @@ impl<T: crate::Config> WeightInfo for SubstrateWeight<T> {
// For backwards compatibility and tests
impl WeightInfo for () {
fn remark(_b: u32, ) -> Weight {
(1_296_000 as Weight)
(1_345_000 as Weight)
}
fn remark_with_event(b: u32, ) -> Weight {
(13_474_000 as Weight)
(9_697_000 as Weight)
// Standard Error: 0
.saturating_add((1_000 as Weight).saturating_mul(b as Weight))
}
fn set_heap_pages() -> Weight {
(2_024_000 as Weight)
(2_070_000 as Weight)
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
}
fn set_changes_trie_config() -> Weight {
(10_551_000 as Weight)
(10_111_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(1 as Weight))
.saturating_add(RocksDbWeight::get().writes(2 as Weight))
}
fn set_storage(i: u32, ) -> Weight {
(0 as Weight)
// Standard Error: 0
.saturating_add((612_000 as Weight).saturating_mul(i as Weight))
.saturating_add((619_000 as Weight).saturating_mul(i as Weight))
.saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight)))
}
fn kill_storage(i: u32, ) -> Weight {
(562_000 as Weight)
(1_647_000 as Weight)
// Standard Error: 0
.saturating_add((442_000 as Weight).saturating_mul(i as Weight))
.saturating_add((460_000 as Weight).saturating_mul(i as Weight))
.saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight)))
}
fn kill_prefix(p: u32, ) -> Weight {
(10_499_000 as Weight)
// Standard Error: 1_000
.saturating_add((840_000 as Weight).saturating_mul(p as Weight))
(10_678_000 as Weight)
// Standard Error: 0
.saturating_add((862_000 as Weight).saturating_mul(p as Weight))
.saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight)))
}
}
......@@ -72,6 +72,13 @@ pub struct BenchmarkCmd {
#[structopt(long)]
pub template: Option<std::path::PathBuf>,
/// Which analysis function to use when outputting benchmarks:
/// * min-squares (default)
/// * median-slopes
/// * max (max of min squares and median slopes for each value)
#[structopt(long)]
pub output_analysis: Option<String>,
/// Set the heap pages while running benchmarks.
#[structopt(long)]
pub heap_pages: Option<u64>,
......
......@@ -20,12 +20,13 @@
use std::collections::HashMap;
use std::fs;
use std::path::PathBuf;
use core::convert::TryInto;
use serde::Serialize;
use inflector::Inflector;
use crate::BenchmarkCmd;
use frame_benchmarking::{BenchmarkBatch, BenchmarkSelector, Analysis, RegressionModel};
use frame_benchmarking::{BenchmarkBatch, BenchmarkSelector, Analysis, AnalysisChoice, RegressionModel};
use sp_runtime::traits::Zero;
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
......@@ -71,6 +72,7 @@ struct CmdData {
wasm_execution: String,
chain: String,
db_cache: u32,
analysis_choice: String,
}
// This encodes the component name and whether that component is used.
......@@ -104,7 +106,10 @@ fn io_error(s: &str) -> std::io::Error {
// p1 -> [b1, b2, b3]
// p2 -> [b1, b2]
// ```
fn map_results(batches: &[BenchmarkBatch]) -> Result<HashMap<(String, String), Vec<BenchmarkData>>, std::io::Error> {
fn map_results(
batches: &[BenchmarkBatch],
analysis_choice: &AnalysisChoice,
) -> Result<HashMap<(String, String), Vec<BenchmarkData>>, std::io::Error> {
// Skip if batches is empty.
if batches.is_empty() { return Err(io_error("empty batches")) }
......@@ -118,7 +123,7 @@ fn map_results(batches: &[BenchmarkBatch]) -> Result<HashMap<(String, String), V
let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap();
let instance_string = String::from_utf8(batch.instance.clone()).unwrap();
let benchmark_data = get_benchmark_data(batch);
let benchmark_data = get_benchmark_data(batch, analysis_choice);
pallet_benchmarks.push(benchmark_data);
// Check if this is the end of the iterator
......@@ -150,11 +155,23 @@ fn extract_errors(model: &Option<RegressionModel>) -> impl Iterator<Item=u128> +
}
// Analyze and return the relevant results for a given benchmark.
fn get_benchmark_data(batch: &BenchmarkBatch) -> BenchmarkData {
fn get_benchmark_data(
batch: &BenchmarkBatch,
analysis_choice: &AnalysisChoice,
) -> BenchmarkData {
// Analyze benchmarks to get the linear regression.
let extrinsic_time = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::ExtrinsicTime).unwrap();
let reads = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Reads).unwrap();
let writes = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Writes).unwrap();
let analysis_function = match analysis_choice {
AnalysisChoice::MinSquares => Analysis::min_squares_iqr,
AnalysisChoice::MedianSlopes => Analysis::median_slopes,
AnalysisChoice::Max => Analysis::max,
};
let extrinsic_time = analysis_function(&batch.results, BenchmarkSelector::ExtrinsicTime)
.expect("analysis function should return an extrinsic time for valid inputs");
let reads = analysis_function(&batch.results, BenchmarkSelector::Reads)
.expect("analysis function should return the number of reads for valid inputs");
let writes = analysis_function(&batch.results, BenchmarkSelector::Writes)
.expect("analysis function should return the number of writes for valid inputs");
// Analysis data may include components that are not used, this filters out anything whose value is zero.
let mut used_components = Vec::new();
......@@ -255,6 +272,11 @@ pub fn write_results(
// Full CLI args passed to trigger the benchmark.
let args = std::env::args().collect::<Vec<String>>();
// Which analysis function should be used when outputting benchmarks
let analysis_choice: AnalysisChoice = cmd.output_analysis.clone()
.try_into()
.map_err(|e| io_error(e))?;
// Capture individual args
let cmd_data = CmdData {
steps: cmd.steps.clone(),
......@@ -265,6 +287,7 @@ pub fn write_results(
wasm_execution: cmd.wasm_method.to_string(),
chain: format!("{:?}", cmd.shared_params.chain),
db_cache: cmd.database_cache_size,
analysis_choice: format!("{:?}", analysis_choice),
};
// New Handlebars instance with helpers.
......@@ -275,7 +298,7 @@ pub fn write_results(
handlebars.register_escape_fn(|s| -> String { s.to_string() });
// Organize results by pallet into a JSON map
let all_results = map_results(batches)?;
let all_results = map_results(batches, &analysis_choice)?;
for ((pallet, instance), results) in all_results.iter() {
let mut file_path = path.clone();
// If a user only specified a directory...
......@@ -455,7 +478,7 @@ mod test {
test_data(b"first", b"first", BenchmarkParameter::a, 10, 3),
test_data(b"first", b"second", BenchmarkParameter::b, 9, 2),
test_data(b"second", b"first", BenchmarkParameter::c, 3, 4),
]).unwrap();
], &AnalysisChoice::default()).unwrap();
let first_benchmark = &mapped_results.get(
&("first_pallet".to_string(), "instance".to_string())
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment