Skip to content
Snippets Groups Projects
Unverified Commit 81b979ae authored by Maksym H's avatar Maksym H Committed by GitHub
Browse files

Mak cmd swap omnibench (#6769)


- change bench to default to old CLI
- fix profile to production

---------

Co-authored-by: default avatarGitHub Action <action@github.com>
Co-authored-by: command-bot <>
parent b2e1e592
No related merge requests found
Pipeline #508568 waiting for manual action with stages
in 16 minutes and 27 seconds
......@@ -58,7 +58,7 @@ bench_example = '''**Examples**:
%(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean
'''
parser_bench = subparsers.add_parser('bench', help='Runs benchmarks', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)
parser_bench = subparsers.add_parser('bench', help='Runs benchmarks (old CLI)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)
for arg, config in common_args.items():
parser_bench.add_argument(arg, **config)
......@@ -67,6 +67,35 @@ parser_bench.add_argument('--runtime', help='Runtime(s) space separated', choice
parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[])
parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true')
"""
BENCH OMNI
"""
bench_example = '''**Examples**:
Runs all benchmarks
%(prog)s
Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions
%(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet
Runs bench for all pallets for westend runtime and fails fast on first failed benchmark
%(prog)s --runtime westend --fail-fast
Does not output anything and cleans up the previous bot's & author command triggering comments in PR
%(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean
'''
parser_bench_old = subparsers.add_parser('bench-omni', help='Runs benchmarks (frame omni bencher)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter)
for arg, config in common_args.items():
parser_bench_old.add_argument(arg, **config)
parser_bench_old.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames)
parser_bench_old.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[])
parser_bench_old.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true')
"""
FMT
"""
......@@ -98,12 +127,12 @@ def main():
print(f'args: {args}')
if args.command == 'bench':
if args.command == 'bench-omni':
runtime_pallets_map = {}
failed_benchmarks = {}
successful_benchmarks = {}
profile = "release"
profile = "production"
print(f'Provided runtimes: {args.runtime}')
# convert to mapped dict
......@@ -113,11 +142,22 @@ def main():
# loop over remaining runtimes to collect available pallets
for runtime in runtimesMatrix.values():
os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}")
build_command = f"forklift cargo build -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}"
print(f'-- building "{runtime["name"]}" with `{build_command}`')
os.system(build_command)
print(f'-- listing pallets for benchmark for {runtime["name"]}')
wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm"
output = os.popen(
f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file} {runtime['bench_flags']}").read()
list_command = f"frame-omni-bencher v1 benchmark pallet " \
f"--no-csv-header " \
f"--no-storage-info " \
f"--no-min-squares " \
f"--no-median-slopes " \
f"--all " \
f"--list " \
f"--runtime={wasm_file} " \
f"{runtime['bench_flags']}"
print(f'-- running: {list_command}')
output = os.popen(list_command).read()
raw_pallets = output.strip().split('\n')
all_pallets = set()
......@@ -230,6 +270,149 @@ def main():
print_and_log('✅ Successful benchmarks of runtimes/pallets:')
for runtime, pallets in successful_benchmarks.items():
print_and_log(f'-- {runtime}: {pallets}')
if args.command == 'bench':
runtime_pallets_map = {}
failed_benchmarks = {}
successful_benchmarks = {}
profile = "production"
print(f'Provided runtimes: {args.runtime}')
# convert to mapped dict
runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix))
runtimesMatrix = {x['name']: x for x in runtimesMatrix}
print(f'Filtered out runtimes: {runtimesMatrix}')
# loop over remaining runtimes to collect available pallets
for runtime in runtimesMatrix.values():
build_command = f"forklift cargo build -p {runtime['old_package']} --profile {profile} --features={runtime['bench_features']} --locked"
print(f'-- building {runtime["name"]} with `{build_command}`')
os.system(build_command)
chain = runtime['name'] if runtime['name'] == 'dev' else f"{runtime['name']}-dev"
machine_test = f"target/{profile}/{runtime['old_bin']} benchmark machine --chain={chain}"
print(f"Running machine test for `{machine_test}`")
os.system(machine_test)
print(f'-- listing pallets for benchmark for {chain}')
list_command = f"target/{profile}/{runtime['old_bin']} " \
f"benchmark pallet " \
f"--no-csv-header " \
f"--no-storage-info " \
f"--no-min-squares " \
f"--no-median-slopes " \
f"--all " \
f"--list " \
f"--chain={chain}"
print(f'-- running: {list_command}')
output = os.popen(list_command).read()
raw_pallets = output.strip().split('\n')
all_pallets = set()
for pallet in raw_pallets:
if pallet:
all_pallets.add(pallet.split(',')[0].strip())
pallets = list(all_pallets)
print(f'Pallets in {runtime["name"]}: {pallets}')
runtime_pallets_map[runtime['name']] = pallets
print(f'\n')
# filter out only the specified pallets from collected runtimes/pallets
if args.pallet:
print(f'Pallets: {args.pallet}')
new_pallets_map = {}
# keep only specified pallets if they exist in the runtime
for runtime in runtime_pallets_map:
if set(args.pallet).issubset(set(runtime_pallets_map[runtime])):
new_pallets_map[runtime] = args.pallet
runtime_pallets_map = new_pallets_map
print(f'Filtered out runtimes & pallets: {runtime_pallets_map}\n')
if not runtime_pallets_map:
if args.pallet and not args.runtime:
print(f"No pallets {args.pallet} found in any runtime")
elif args.runtime and not args.pallet:
print(f"{args.runtime} runtime does not have any pallets")
elif args.runtime and args.pallet:
print(f"No pallets {args.pallet} found in {args.runtime}")
else:
print('No runtimes found')
sys.exit(1)
for runtime in runtime_pallets_map:
for pallet in runtime_pallets_map[runtime]:
config = runtimesMatrix[runtime]
header_path = os.path.abspath(config['header'])
template = None
chain = config['name'] if runtime == 'dev' else f"{config['name']}-dev"
print(f'-- config: {config}')
if runtime == 'dev':
# to support sub-modules (https://github.com/paritytech/command-bot/issues/275)
search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'"
print(f'-- running: {search_manifest_path}')
manifest_path = os.popen(search_manifest_path).read()
if not manifest_path:
print(f'-- pallet {pallet} not found in dev runtime')
if args.fail_fast:
print_and_log(f'Error: {pallet} not found in dev runtime')
sys.exit(1)
package_dir = os.path.dirname(manifest_path)
print(f'-- package_dir: {package_dir}')
print(f'-- manifest_path: {manifest_path}')
output_path = os.path.join(package_dir, "src", "weights.rs")
template = config['template']
else:
default_path = f"./{config['path']}/src/weights"
xcm_path = f"./{config['path']}/src/weights/xcm"
output_path = default_path
if pallet.startswith("pallet_xcm_benchmarks"):
template = config['template']
output_path = xcm_path
print(f'-- benchmarking {pallet} in {runtime} into {output_path}')
cmd = f"target/{profile}/{config['old_bin']} benchmark pallet " \
f"--extrinsic=* " \
f"--chain={chain} " \
f"--pallet={pallet} " \
f"--header={header_path} " \
f"--output={output_path} " \
f"--wasm-execution=compiled " \
f"--steps=50 " \
f"--repeat=20 " \
f"--heap-pages=4096 " \
f"{f'--template={template} ' if template else ''}" \
f"--no-storage-info --no-min-squares --no-median-slopes "
print(f'-- Running: {cmd} \n')
status = os.system(cmd)
if status != 0 and args.fail_fast:
print_and_log(f'❌ Failed to benchmark {pallet} in {runtime}')
sys.exit(1)
# Otherwise collect failed benchmarks and print them at the end
# push failed pallets to failed_benchmarks
if status != 0:
failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet]
else:
successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet]
if failed_benchmarks:
print_and_log('❌ Failed benchmarks of runtimes/pallets:')
for runtime, pallets in failed_benchmarks.items():
print_and_log(f'-- {runtime}: {pallets}')
if successful_benchmarks:
print_and_log('✅ Successful benchmarks of runtimes/pallets:')
for runtime, pallets in successful_benchmarks.items():
print_and_log(f'-- {runtime}: {pallets}')
elif args.command == 'fmt':
command = f"cargo +nightly fmt"
......
......@@ -47,7 +47,7 @@ mock_runtimes_matrix = [
def get_mock_bench_output(runtime, pallets, output_path, header, bench_flags, template = None):
return f"frame-omni-bencher v1 benchmark pallet --extrinsic=* " \
f"--runtime=target/release/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \
f"--runtime=target/production/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \
f"--pallet={pallets} --header={header} " \
f"--output={output_path} " \
f"--wasm-execution=compiled " \
......@@ -93,7 +93,7 @@ class TestCmd(unittest.TestCase):
def test_bench_command_normal_execution_all_runtimes(self):
self.mock_parse_args.return_value = (argparse.Namespace(
command='bench',
command='bench-omni',
runtime=list(map(lambda x: x['name'], mock_runtimes_matrix)),
pallet=['pallet_balances'],
fail_fast=True,
......@@ -117,10 +117,10 @@ class TestCmd(unittest.TestCase):
expected_calls = [
# Build calls
call("forklift cargo build -p kitchensink-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p rococo-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p asset-hub-westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p kitchensink-runtime --profile production --features=runtime-benchmarks"),
call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"),
call("forklift cargo build -p rococo-runtime --profile production --features=runtime-benchmarks"),
call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"),
call(get_mock_bench_output(
runtime='kitchensink',
......@@ -150,7 +150,7 @@ class TestCmd(unittest.TestCase):
def test_bench_command_normal_execution(self):
self.mock_parse_args.return_value = (argparse.Namespace(
command='bench',
command='bench-omni',
runtime=['westend'],
pallet=['pallet_balances', 'pallet_staking'],
fail_fast=True,
......@@ -170,7 +170,7 @@ class TestCmd(unittest.TestCase):
expected_calls = [
# Build calls
call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"),
# Westend runtime calls
call(get_mock_bench_output(
......@@ -193,7 +193,7 @@ class TestCmd(unittest.TestCase):
def test_bench_command_normal_execution_xcm(self):
self.mock_parse_args.return_value = (argparse.Namespace(
command='bench',
command='bench-omni',
runtime=['westend'],
pallet=['pallet_xcm_benchmarks::generic'],
fail_fast=True,
......@@ -213,7 +213,7 @@ class TestCmd(unittest.TestCase):
expected_calls = [
# Build calls
call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"),
# Westend runtime calls
call(get_mock_bench_output(
......@@ -229,7 +229,7 @@ class TestCmd(unittest.TestCase):
def test_bench_command_two_runtimes_two_pallets(self):
self.mock_parse_args.return_value = (argparse.Namespace(
command='bench',
command='bench-omni',
runtime=['westend', 'rococo'],
pallet=['pallet_balances', 'pallet_staking'],
fail_fast=True,
......@@ -250,8 +250,8 @@ class TestCmd(unittest.TestCase):
expected_calls = [
# Build calls
call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p rococo-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"),
call("forklift cargo build -p rococo-runtime --profile production --features=runtime-benchmarks"),
# Westend runtime calls
call(get_mock_bench_output(
runtime='westend',
......@@ -287,7 +287,7 @@ class TestCmd(unittest.TestCase):
def test_bench_command_one_dev_runtime(self):
self.mock_parse_args.return_value = (argparse.Namespace(
command='bench',
command='bench-omni',
runtime=['dev'],
pallet=['pallet_balances'],
fail_fast=True,
......@@ -309,7 +309,7 @@ class TestCmd(unittest.TestCase):
expected_calls = [
# Build calls
call("forklift cargo build -p kitchensink-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p kitchensink-runtime --profile production --features=runtime-benchmarks"),
# Westend runtime calls
call(get_mock_bench_output(
runtime='kitchensink',
......@@ -324,7 +324,7 @@ class TestCmd(unittest.TestCase):
def test_bench_command_one_cumulus_runtime(self):
self.mock_parse_args.return_value = (argparse.Namespace(
command='bench',
command='bench-omni',
runtime=['asset-hub-westend'],
pallet=['pallet_assets'],
fail_fast=True,
......@@ -344,7 +344,7 @@ class TestCmd(unittest.TestCase):
expected_calls = [
# Build calls
call("forklift cargo build -p asset-hub-westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"),
# Asset-hub-westend runtime calls
call(get_mock_bench_output(
runtime='asset-hub-westend',
......@@ -359,7 +359,7 @@ class TestCmd(unittest.TestCase):
def test_bench_command_one_cumulus_runtime_xcm(self):
self.mock_parse_args.return_value = (argparse.Namespace(
command='bench',
command='bench-omni',
runtime=['asset-hub-westend'],
pallet=['pallet_xcm_benchmarks::generic', 'pallet_assets'],
fail_fast=True,
......@@ -379,7 +379,7 @@ class TestCmd(unittest.TestCase):
expected_calls = [
# Build calls
call("forklift cargo build -p asset-hub-westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"),
# Asset-hub-westend runtime calls
call(get_mock_bench_output(
runtime='asset-hub-westend',
......
......@@ -227,7 +227,8 @@ jobs:
cat .github/env >> $GITHUB_OUTPUT
if [ -n "$IMAGE_OVERRIDE" ]; then
echo "IMAGE=$IMAGE_OVERRIDE" >> $GITHUB_OUTPUT
IMAGE=$IMAGE_OVERRIDE
echo "IMAGE=$IMAGE" >> $GITHUB_OUTPUT
fi
if [[ $BODY == "/cmd bench"* ]]; then
......@@ -237,6 +238,10 @@ jobs:
else
echo "RUNNER=ubuntu-latest" >> $GITHUB_OUTPUT
fi
- name: Print outputs
run: |
echo "RUNNER=${{ steps.set-image.outputs.RUNNER }}"
echo "IMAGE=${{ steps.set-image.outputs.IMAGE }}"
# Get PR branch name, because the issue_comment event does not contain the PR branch name
get-pr-branch:
......@@ -283,10 +288,16 @@ jobs:
env:
JOB_NAME: "cmd"
runs-on: ${{ needs.set-image.outputs.RUNNER }}
timeout-minutes: 4320 # 72 hours -> 3 days; as it could take a long time to run all the runtimes/pallets
container:
image: ${{ needs.set-image.outputs.IMAGE }}
timeout-minutes: 1440 # 24 hours per runtime
steps:
- name: Checkout
uses: actions/checkout@v4
with:
repository: ${{ needs.get-pr-branch.outputs.repo }}
ref: ${{ needs.get-pr-branch.outputs.pr-branch }}
- name: Get command
uses: actions-ecosystem/action-regex-match@v2
id: get-pr-comment
......@@ -340,13 +351,7 @@ jobs:
repo: context.repo.repo,
body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has started 🚀 [See logs here](${job_url})`
})
- name: Checkout
uses: actions/checkout@v4
with:
repository: ${{ needs.get-pr-branch.outputs.repo }}
ref: ${{ needs.get-pr-branch.outputs.pr-branch }}
- name: Install dependencies for bench
if: startsWith(steps.get-pr-comment.outputs.group2, 'bench')
run: |
......@@ -364,6 +369,7 @@ jobs:
# Fixes "detected dubious ownership" error in the ci
git config --global --add safe.directory '*'
git remote -v
cat /proc/cpuinfo
python3 -m pip install -r .github/scripts/generate-prdoc.requirements.txt
python3 .github/scripts/cmd/cmd.py $CMD $PR_ARG
git status
......
......@@ -8,6 +8,8 @@
"bench_features": "runtime-benchmarks",
"bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage",
"uri": null,
"old_package": "staging-node-cli",
"old_bin": "substrate-node",
"is_relay": false
},
{
......@@ -19,6 +21,8 @@
"bench_flags": "",
"bench_features": "runtime-benchmarks",
"uri": "wss://try-runtime-westend.polkadot.io:443",
"old_package": "polkadot",
"old_bin": "polkadot",
"is_relay": true
},
{
......@@ -27,9 +31,11 @@
"path": "polkadot/runtime/rococo",
"header": "polkadot/file_header.txt",
"template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs",
"uri": "wss://try-runtime-rococo.polkadot.io:443",
"bench_features": "runtime-benchmarks",
"bench_flags": "",
"uri": "wss://try-runtime-rococo.polkadot.io:443",
"old_package": "polkadot",
"old_bin": "polkadot",
"is_relay": true
},
{
......@@ -41,6 +47,8 @@
"bench_features": "runtime-benchmarks",
"bench_flags": "",
"uri": "wss://westend-asset-hub-rpc.polkadot.io:443",
"old_package": "polkadot-parachain-bin",
"old_bin": "polkadot-parachain",
"is_relay": false
},
{
......@@ -52,6 +60,8 @@
"bench_features": "runtime-benchmarks",
"bench_flags": "",
"uri": "wss://rococo-asset-hub-rpc.polkadot.io:443",
"old_package": "polkadot-parachain-bin",
"old_bin": "polkadot-parachain",
"is_relay": false
},
{
......@@ -63,6 +73,8 @@
"bench_features": "runtime-benchmarks",
"bench_flags": "",
"uri": "wss://rococo-bridge-hub-rpc.polkadot.io:443",
"old_package": "polkadot-parachain-bin",
"old_bin": "polkadot-parachain",
"is_relay": false
},
{
......@@ -74,6 +86,8 @@
"bench_features": "runtime-benchmarks",
"bench_flags": "",
"uri": "wss://westend-bridge-hub-rpc.polkadot.io:443",
"old_package": "polkadot-parachain-bin",
"old_bin": "polkadot-parachain",
"is_relay": false
},
{
......@@ -84,7 +98,10 @@
"template": "cumulus/templates/xcm-bench-template.hbs",
"bench_features": "runtime-benchmarks",
"bench_flags": "",
"uri": "wss://westend-collectives-rpc.polkadot.io:443"
"uri": "wss://westend-collectives-rpc.polkadot.io:443",
"old_package": "polkadot-parachain-bin",
"old_bin": "polkadot-parachain",
"is_relay": false
},
{
"name": "contracts-rococo",
......@@ -95,6 +112,8 @@
"bench_features": "runtime-benchmarks",
"bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm",
"uri": "wss://rococo-contracts-rpc.polkadot.io:443",
"old_package": "polkadot-parachain-bin",
"old_bin": "polkadot-parachain",
"is_relay": false
},
{
......@@ -106,6 +125,8 @@
"bench_features": "runtime-benchmarks",
"bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic",
"uri": "wss://rococo-coretime-rpc.polkadot.io:443",
"old_package": "polkadot-parachain-bin",
"old_bin": "polkadot-parachain",
"is_relay": false
},
{
......@@ -117,6 +138,8 @@
"bench_features": "runtime-benchmarks",
"bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic",
"uri": "wss://westend-coretime-rpc.polkadot.io:443",
"old_package": "polkadot-parachain-bin",
"old_bin": "polkadot-parachain",
"is_relay": false
},
{
......@@ -128,6 +151,8 @@
"bench_features": "runtime-benchmarks",
"bench_flags": "--genesis-builder-policy=none",
"uri": null,
"old_package": "polkadot-parachain-bin",
"old_bin": "polkadot-parachain",
"is_relay": false
},
{
......@@ -139,6 +164,8 @@
"bench_features": "runtime-benchmarks",
"bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic",
"uri": "wss://rococo-people-rpc.polkadot.io:443",
"old_package": "polkadot-parachain-bin",
"old_bin": "polkadot-parachain",
"is_relay": false
},
{
......@@ -150,6 +177,8 @@
"bench_features": "runtime-benchmarks",
"bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic",
"uri": "wss://westend-people-rpc.polkadot.io:443",
"old_package": "polkadot-parachain-bin",
"old_bin": "polkadot-parachain",
"is_relay": false
}
]
......@@ -17,25 +17,27 @@
//! Autogenerated weights for `pallet_balances`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
//! DATE: 2024-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! DATE: 2024-12-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! HOSTNAME: `95c137a642c3`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024
// Executed Command:
// target/production/polkadot
// benchmark
// pallet
// --steps=50
// --repeat=20
// --extrinsic=*
// --chain=westend-dev
// --pallet=pallet_balances
// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt
// --output=./polkadot/runtime/westend/src/weights
// --wasm-execution=compiled
// --steps=50
// --repeat=20
// --heap-pages=4096
// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
// --pallet=pallet_balances
// --chain=westend-dev
// --header=./polkadot/file_header.txt
// --output=./polkadot/runtime/westend/src/weights/
// --no-storage-info
// --no-min-squares
// --no-median-slopes
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
......@@ -54,8 +56,8 @@ impl<T: frame_system::Config> pallet_balances::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `3593`
// Minimum execution time: 50_394_000 picoseconds.
Weight::from_parts(51_666_000, 0)
// Minimum execution time: 51_474_000 picoseconds.
Weight::from_parts(52_840_000, 0)
.saturating_add(Weight::from_parts(0, 3593))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
......@@ -66,8 +68,8 @@ impl<T: frame_system::Config> pallet_balances::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `3593`
// Minimum execution time: 40_307_000 picoseconds.
Weight::from_parts(41_722_000, 0)
// Minimum execution time: 39_875_000 picoseconds.
Weight::from_parts(41_408_000, 0)
.saturating_add(Weight::from_parts(0, 3593))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
......@@ -78,8 +80,8 @@ impl<T: frame_system::Config> pallet_balances::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `174`
// Estimated: `3593`
// Minimum execution time: 19_110_000 picoseconds.
Weight::from_parts(19_623_000, 0)
// Minimum execution time: 19_614_000 picoseconds.
Weight::from_parts(20_194_000, 0)
.saturating_add(Weight::from_parts(0, 3593))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
......@@ -90,8 +92,8 @@ impl<T: frame_system::Config> pallet_balances::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `174`
// Estimated: `3593`
// Minimum execution time: 26_837_000 picoseconds.
Weight::from_parts(27_672_000, 0)
// Minimum execution time: 27_430_000 picoseconds.
Weight::from_parts(28_151_000, 0)
.saturating_add(Weight::from_parts(0, 3593))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
......@@ -102,8 +104,8 @@ impl<T: frame_system::Config> pallet_balances::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `103`
// Estimated: `6196`
// Minimum execution time: 53_032_000 picoseconds.
Weight::from_parts(54_040_000, 0)
// Minimum execution time: 54_131_000 picoseconds.
Weight::from_parts(54_810_000, 0)
.saturating_add(Weight::from_parts(0, 6196))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(2))
......@@ -114,8 +116,8 @@ impl<T: frame_system::Config> pallet_balances::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `3593`
// Minimum execution time: 49_429_000 picoseconds.
Weight::from_parts(50_020_000, 0)
// Minimum execution time: 48_692_000 picoseconds.
Weight::from_parts(51_416_000, 0)
.saturating_add(Weight::from_parts(0, 3593))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
......@@ -126,8 +128,8 @@ impl<T: frame_system::Config> pallet_balances::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `174`
// Estimated: `3593`
// Minimum execution time: 22_114_000 picoseconds.
Weight::from_parts(22_526_000, 0)
// Minimum execution time: 22_604_000 picoseconds.
Weight::from_parts(23_336_000, 0)
.saturating_add(Weight::from_parts(0, 3593))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
......@@ -139,11 +141,11 @@ impl<T: frame_system::Config> pallet_balances::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `0 + u * (136 ±0)`
// Estimated: `990 + u * (2603 ±0)`
// Minimum execution time: 17_550_000 picoseconds.
Weight::from_parts(17_860_000, 0)
// Minimum execution time: 18_118_000 picoseconds.
Weight::from_parts(18_352_000, 0)
.saturating_add(Weight::from_parts(0, 990))
// Standard Error: 11_891
.saturating_add(Weight::from_parts(15_027_705, 0).saturating_mul(u.into()))
// Standard Error: 14_688
.saturating_add(Weight::from_parts(15_412_440, 0).saturating_mul(u.into()))
.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into())))
.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into())))
.saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into()))
......@@ -152,24 +154,24 @@ impl<T: frame_system::Config> pallet_balances::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 6_605_000 picoseconds.
Weight::from_parts(6_922_000, 0)
// Minimum execution time: 6_779_000 picoseconds.
Weight::from_parts(7_246_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
fn burn_allow_death() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 31_182_000 picoseconds.
Weight::from_parts(32_104_000, 0)
// Minimum execution time: 30_935_000 picoseconds.
Weight::from_parts(32_251_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
fn burn_keep_alive() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 21_105_000 picoseconds.
Weight::from_parts(21_533_000, 0)
// Minimum execution time: 21_002_000 picoseconds.
Weight::from_parts(21_760_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
}
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment