Skip to content
Snippets Groups Projects
Unverified Commit a2a049db authored by Andrei Eres's avatar Andrei Eres Committed by GitHub
Browse files

[subsystem-benchmark] Add approval-voting benchmark to CI (#4216)


Co-authored-by: default avataralvicsam <alvicsam@gmail.com>
parent 921265ca
No related merge requests found
Pipeline #468229 failed with stages
in 1 hour, 6 minutes, and 57 seconds
...@@ -74,6 +74,8 @@ publish-subsystem-benchmarks: ...@@ -74,6 +74,8 @@ publish-subsystem-benchmarks:
artifacts: true artifacts: true
- job: subsystem-benchmark-availability-distribution - job: subsystem-benchmark-availability-distribution
artifacts: true artifacts: true
- job: subsystem-benchmark-approval-voting
artifacts: true
- job: publish-rustdoc - job: publish-rustdoc
artifacts: false artifacts: false
script: script:
...@@ -115,6 +117,8 @@ trigger_workflow: ...@@ -115,6 +117,8 @@ trigger_workflow:
artifacts: true artifacts: true
- job: subsystem-benchmark-availability-distribution - job: subsystem-benchmark-availability-distribution
artifacts: true artifacts: true
- job: subsystem-benchmark-approval-voting
artifacts: true
script: script:
- echo "Triggering workflow" - echo "Triggering workflow"
- > - >
......
...@@ -511,7 +511,7 @@ test-syscalls: ...@@ -511,7 +511,7 @@ test-syscalls:
fi fi
allow_failure: false # this rarely triggers in practice allow_failure: false # this rarely triggers in practice
subsystem-benchmark-availability-recovery: .subsystem-benchmark-template:
stage: test stage: test
artifacts: artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
...@@ -523,26 +523,26 @@ subsystem-benchmark-availability-recovery: ...@@ -523,26 +523,26 @@ subsystem-benchmark-availability-recovery:
- .docker-env - .docker-env
- .common-refs - .common-refs
- .run-immediately - .run-immediately
script:
- cargo bench -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks
tags: tags:
- benchmark - benchmark
subsystem-benchmark-availability-recovery:
extends:
- .subsystem-benchmark-template
script:
- cargo bench -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks
allow_failure: true allow_failure: true
subsystem-benchmark-availability-distribution: subsystem-benchmark-availability-distribution:
stage: test
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
when: always
expire_in: 1 hour
paths:
- charts/
extends: extends:
- .docker-env - .subsystem-benchmark-template
- .common-refs
- .run-immediately
script: script:
- cargo bench -p polkadot-availability-distribution --bench availability-distribution-regression-bench --features subsystem-benchmarks - cargo bench -p polkadot-availability-distribution --bench availability-distribution-regression-bench --features subsystem-benchmarks
tags: allow_failure: true
- benchmark
subsystem-benchmark-approval-voting:
extends:
- .subsystem-benchmark-template
script:
- cargo bench -p polkadot-node-core-approval-voting --bench approval-voting-regression-bench --features subsystem-benchmarks
allow_failure: true allow_failure: true
...@@ -13017,6 +13017,7 @@ dependencies = [ ...@@ -13017,6 +13017,7 @@ dependencies = [
"polkadot-overseer", "polkadot-overseer",
"polkadot-primitives", "polkadot-primitives",
"polkadot-primitives-test-helpers", "polkadot-primitives-test-helpers",
"polkadot-subsystem-bench",
"rand 0.8.5", "rand 0.8.5",
"rand_chacha 0.3.1", "rand_chacha 0.3.1",
"rand_core 0.6.4", "rand_core 0.6.4",
......
...@@ -53,3 +53,14 @@ kvdb-memorydb = "0.13.0" ...@@ -53,3 +53,14 @@ kvdb-memorydb = "0.13.0"
test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" }
log = { workspace = true, default-features = true } log = { workspace = true, default-features = true }
env_logger = "0.11" env_logger = "0.11"
polkadot-subsystem-bench = { path = "../../subsystem-bench" }
[[bench]]
name = "approval-voting-regression-bench"
path = "benches/approval-voting-regression-bench.rs"
harness = false
required-features = ["subsystem-benchmarks"]
[features]
subsystem-benchmarks = []
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! approval-voting throughput test
//!
//! Approval Voting benchmark based on Kusama parameters and scale.
//!
//! Subsystems involved:
//! - approval-distribution
//! - approval-voting
use polkadot_subsystem_bench::{
self,
approval::{bench_approvals, prepare_test, ApprovalsOptions},
configuration::TestConfiguration,
usage::BenchmarkUsage,
utils::save_to_file,
};
use std::io::Write;
const BENCH_COUNT: usize = 10;
fn main() -> Result<(), String> {
let mut messages = vec![];
let mut config = TestConfiguration::default();
config.n_cores = 100;
config.n_validators = 500;
config.num_blocks = 10;
config.peer_bandwidth = 524288000000;
config.bandwidth = 524288000000;
config.latency = None;
config.connectivity = 100;
config.generate_pov_sizes();
let options = ApprovalsOptions {
last_considered_tranche: 89,
coalesce_mean: 3.0,
coalesce_std_dev: 1.0,
coalesce_tranche_diff: 12,
enable_assignments_v2: true,
stop_when_approved: false,
workdir_prefix: "/tmp".to_string(),
num_no_shows_per_candidate: 0,
};
println!("Benchmarking...");
let usages: Vec<BenchmarkUsage> = (0..BENCH_COUNT)
.map(|n| {
print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n));
std::io::stdout().flush().unwrap();
let (mut env, state) = prepare_test(config.clone(), options.clone(), false);
env.runtime().block_on(bench_approvals("approvals_throughput", &mut env, state))
})
.collect();
println!("\rDone!{}", " ".repeat(BENCH_COUNT));
let average_usage = BenchmarkUsage::average(&usages);
save_to_file(
"charts/availability-distribution-regression-bench.json",
average_usage.to_chart_json().map_err(|e| e.to_string())?,
)
.map_err(|e| e.to_string())?;
println!("{}", average_usage);
// We expect no variance for received and sent
// but use 0.001 because we operate with floats
messages.extend(average_usage.check_network_usage(&[
("Received from peers", 52944.7000, 0.001),
("Sent to peers", 63532.2000, 0.001),
]));
messages.extend(average_usage.check_cpu_usage(&[
("approval-distribution", 7.7883, 0.1),
("approval-voting", 10.4655, 0.1),
]));
if messages.is_empty() {
Ok(())
} else {
eprintln!("{}", messages.join("\n"));
Err("Regressions found".to_string())
}
}
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment