diff --git a/Cargo.lock b/Cargo.lock index 074b657e767b1cc121a471f5aee662fc2290ebda..9e52bfcf9a429ab08026f7da577b8d982281bf20 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13644,6 +13644,7 @@ dependencies = [ "sc-service", "schnorrkel 0.11.4", "serde", + "serde_json", "serde_yaml", "sha1", "sp-application-crypto", diff --git a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs index 019eb1222082a275b1629de6dedcc69e3437dd8a..c33674a8f2f926ad0a186bc867aabe843f091e32 100644 --- a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs +++ b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs @@ -27,6 +27,7 @@ use polkadot_subsystem_bench::{ availability::{benchmark_availability_write, prepare_test, TestState}, configuration::TestConfiguration, usage::BenchmarkUsage, + utils::save_to_file, }; use std::io::Write; @@ -60,7 +61,13 @@ fn main() -> Result<(), String> { }) .collect(); println!("\rDone!{}", " ".repeat(BENCH_COUNT)); + let average_usage = BenchmarkUsage::average(&usages); + save_to_file( + "charts/availability-distribution-regression-bench.json", + average_usage.to_chart_json().map_err(|e| e.to_string())?, + ) + .map_err(|e| e.to_string())?; println!("{}", average_usage); // We expect no variance for received and sent diff --git a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs index 5e8b81be82dd2b1e06a2b17b6e9c8c9a8fa24675..46a38516898f2eea48602d951e928f94838c592f 100644 --- a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs +++ b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs @@ -28,6 +28,7 @@ use polkadot_subsystem_bench::{ }, configuration::TestConfiguration, usage::BenchmarkUsage, + utils::save_to_file, }; use std::io::Write; @@ -58,7 +59,13 @@ fn main() -> Result<(), String> { }) .collect(); println!("\rDone!{}", " ".repeat(BENCH_COUNT)); + let average_usage = BenchmarkUsage::average(&usages); + save_to_file( + "charts/availability-recovery-regression-bench.json", + average_usage.to_chart_json().map_err(|e| e.to_string())?, + ) + .map_err(|e| e.to_string())?; println!("{}", average_usage); // We expect no variance for received and sent diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 05907e428f92ac9f2b44fb923f33eb1aa7b32492..b494f05180d1b06f740058cc368c63b0d9552277 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -71,6 +71,7 @@ prometheus_endpoint = { package = "substrate-prometheus-endpoint", path = "../.. prometheus = { version = "0.13.0", default-features = false } serde = { workspace = true, default-features = true } serde_yaml = { workspace = true } +serde_json = { workspace = true } polkadot-node-core-approval-voting = { path = "../core/approval-voting" } polkadot-approval-distribution = { path = "../network/approval-distribution" } diff --git a/polkadot/node/subsystem-bench/src/lib/environment.rs b/polkadot/node/subsystem-bench/src/lib/environment.rs index 2d80d75a14aaa275327d61f7f5fa187cf703e9ec..42955d0302232f35e387a044e34a0c7d665512e8 100644 --- a/polkadot/node/subsystem-bench/src/lib/environment.rs +++ b/polkadot/node/subsystem-bench/src/lib/environment.rs @@ -404,7 +404,7 @@ impl TestEnvironment { let total_cpu = test_env_cpu_metrics.sum_by("substrate_tasks_polling_duration_sum"); usage.push(ResourceUsage { - resource_name: "Test environment".to_string(), + resource_name: "test-environment".to_string(), total: total_cpu, per_block: total_cpu / num_blocks, }); diff --git a/polkadot/node/subsystem-bench/src/lib/lib.rs b/polkadot/node/subsystem-bench/src/lib/lib.rs index d06f2822a8958169a15f449e71251e3cc62eb9d6..ef2724abc98920c79d8dd9d94f97bed32b0ab8e2 100644 --- a/polkadot/node/subsystem-bench/src/lib/lib.rs +++ b/polkadot/node/subsystem-bench/src/lib/lib.rs @@ -26,3 +26,4 @@ pub(crate) mod keyring; pub(crate) mod mock; pub(crate) mod network; pub mod usage; +pub mod utils; diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs index 7172969a8f920b0863753460a0dac0a1d8e90fa4..59296746ec3d4154274ce68d9ee910bb61d0f9f8 100644 --- a/polkadot/node/subsystem-bench/src/lib/usage.rs +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -82,6 +82,27 @@ impl BenchmarkUsage { _ => None, } } + + // Prepares a json string for a graph representation + // See: https://github.com/benchmark-action/github-action-benchmark?tab=readme-ov-file#examples + pub fn to_chart_json(&self) -> color_eyre::eyre::Result<String> { + let chart = self + .network_usage + .iter() + .map(|v| ChartItem { + name: v.resource_name.clone(), + unit: "KiB".to_string(), + value: v.per_block, + }) + .chain(self.cpu_usage.iter().map(|v| ChartItem { + name: v.resource_name.clone(), + unit: "seconds".to_string(), + value: v.per_block, + })) + .collect::<Vec<_>>(); + + Ok(serde_json::to_string(&chart)?) + } } fn check_usage( @@ -151,3 +172,10 @@ impl ResourceUsage { } type ResourceUsageCheck<'a> = (&'a str, f64, f64); + +#[derive(Debug, Serialize)] +pub struct ChartItem { + pub name: String, + pub unit: String, + pub value: f64, +} diff --git a/polkadot/node/subsystem-bench/src/lib/utils.rs b/polkadot/node/subsystem-bench/src/lib/utils.rs index cd206d8f32233b8166d05cd6790e5837b045d6ee..b3cd3a88b6c1324ec18a402cbc92524f4865380f 100644 --- a/polkadot/node/subsystem-bench/src/lib/utils.rs +++ b/polkadot/node/subsystem-bench/src/lib/utils.rs @@ -16,61 +16,26 @@ //! Test utils -use crate::usage::BenchmarkUsage; -use std::io::{stdout, Write}; - -pub struct WarmUpOptions<'a> { - /// The maximum number of runs considered for warming up. - pub warm_up: usize, - /// The number of runs considered for benchmarking. - pub bench: usize, - /// The difference in CPU usage between runs considered as normal - pub precision: f64, - /// The subsystems whose CPU usage is checked during warm-up cycles - pub subsystems: &'a [&'a str], -} - -impl<'a> WarmUpOptions<'a> { - pub fn new(subsystems: &'a [&'a str]) -> Self { - Self { warm_up: 100, bench: 3, precision: 0.02, subsystems } - } -} - -pub fn warm_up_and_benchmark( - options: WarmUpOptions, - run: impl Fn() -> BenchmarkUsage, -) -> Result<BenchmarkUsage, String> { - println!("Warming up..."); - let mut usages = Vec::with_capacity(options.bench); - - for n in 1..=options.warm_up { - let curr = run(); - if let Some(prev) = usages.last() { - let diffs = options - .subsystems - .iter() - .map(|&v| { - curr.cpu_usage_diff(prev, v) - .ok_or(format!("{} not found in benchmark {:?}", v, prev)) - }) - .collect::<Result<Vec<f64>, String>>()?; - if !diffs.iter().all(|&v| v < options.precision) { - usages.clear(); - } - } - usages.push(curr); - print!("\r{}%", n * 100 / options.warm_up); - if usages.len() == options.bench { - println!("\rTook {} runs to warm up", n.saturating_sub(options.bench)); - break; - } - stdout().flush().unwrap(); - } - - if usages.len() != options.bench { - println!("Didn't warm up after {} runs", options.warm_up); - return Err("Can't warm up".to_string()) +use std::{fs::File, io::Write}; + +// Saves a given string to a file +pub fn save_to_file(path: &str, value: String) -> color_eyre::eyre::Result<()> { + let output = std::process::Command::new(env!("CARGO")) + .arg("locate-project") + .arg("--workspace") + .arg("--message-format=plain") + .output() + .unwrap() + .stdout; + let workspace_dir = std::path::Path::new(std::str::from_utf8(&output).unwrap().trim()) + .parent() + .unwrap(); + let path = workspace_dir.join(path); + if let Some(dir) = path.parent() { + std::fs::create_dir_all(dir)?; } + let mut file = File::create(path)?; + file.write_all(value.as_bytes())?; - Ok(BenchmarkUsage::average(&usages)) + Ok(()) }