diff --git a/.github/scripts/check-prdoc.py b/.github/scripts/check-prdoc.py new file mode 100644 index 0000000000000000000000000000000000000000..42b063f2885da148033986dfa49740f2b0416460 --- /dev/null +++ b/.github/scripts/check-prdoc.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 + +''' +Ensure that the prdoc files are valid. + +# Example + +```sh +python3 -m pip install cargo-workspace +python3 .github/scripts/check-prdoc.py Cargo.toml prdoc/*.prdoc +``` + +Produces example output: +```pre +🔎 Reading workspace polkadot-sdk/Cargo.toml +📦 Checking 32 prdocs against 493 crates. +✅ All prdocs are valid +``` +''' + +import os +import yaml +import argparse +import cargo_workspace + +def check_prdoc_crate_names(root, paths): + ''' + Check that all crates of the `crates` section of each prdoc is present in the workspace. + ''' + + print(f'🔎 Reading workspace {root}.') + workspace = cargo_workspace.Workspace.from_path(root) + crate_names = [crate.name for crate in workspace.crates] + + print(f'📦 Checking {len(paths)} prdocs against {len(crate_names)} crates.') + faulty = {} + + for path in paths: + with open(path, 'r') as f: + prdoc = yaml.safe_load(f) + + for crate in prdoc.get('crates', []): + crate = crate['name'] + if crate in crate_names: + continue + + faulty.setdefault(path, []).append(crate) + + if len(faulty) == 0: + print('✅ All prdocs are valid.') + else: + print('❌ Some prdocs are invalid.') + for path, crates in faulty.items(): + print(f'💥 {path} lists invalid crate: {", ".join(crates)}') + exit(1) + +def parse_args(): + parser = argparse.ArgumentParser(description='Check prdoc files') + parser.add_argument('root', help='The cargo workspace manifest', metavar='root', type=str, nargs=1) + parser.add_argument('prdoc', help='The prdoc files', metavar='prdoc', type=str, nargs='*') + args = parser.parse_args() + + if len(args.prdoc) == 0: + print('❌ Need at least one prdoc file as argument.') + exit(1) + + return { 'root': os.path.abspath(args.root[0]), 'prdocs': args.prdoc } + +if __name__ == '__main__': + args = parse_args() + check_prdoc_crate_names(args['root'], args['prdocs']) diff --git a/.github/scripts/check-workspace.py b/.github/scripts/check-workspace.py index d200122fee9f7035dce8c811e7c24c003d9545a4..1f8f103e4e157a8c1c804a618652741193ca5a00 100644 --- a/.github/scripts/check-workspace.py +++ b/.github/scripts/check-workspace.py @@ -18,7 +18,7 @@ def parse_args(): parser.add_argument('workspace_dir', help='The directory to check', metavar='workspace_dir', type=str, nargs=1) parser.add_argument('--exclude', help='Exclude crate paths from the check', metavar='exclude', type=str, nargs='*', default=[]) - + args = parser.parse_args() return (args.workspace_dir[0], args.exclude) @@ -26,7 +26,7 @@ def main(root, exclude): workspace_crates = get_members(root, exclude) all_crates = get_crates(root, exclude) print(f'📦 Found {len(all_crates)} crates in total') - + check_duplicates(workspace_crates) check_missing(workspace_crates, all_crates) check_links(all_crates) @@ -48,14 +48,14 @@ def get_members(workspace_dir, exclude): if not 'members' in root_manifest['workspace']: return [] - + members = [] for member in root_manifest['workspace']['members']: if member in exclude: print(f'❌ Excluded member should not appear in the workspace {member}') sys.exit(1) members.append(member) - + return members # List all members of the workspace. @@ -74,12 +74,12 @@ def get_crates(workspace_dir, exclude_crates) -> dict: with open(path, "r") as f: content = f.read() manifest = toml.loads(content) - + if 'workspace' in manifest: if root != workspace_dir: print("⏩ Excluded recursive workspace at %s" % path) continue - + # Cut off the root path and the trailing /Cargo.toml. path = path[len(workspace_dir)+1:-11] name = manifest['package']['name'] @@ -87,7 +87,7 @@ def get_crates(workspace_dir, exclude_crates) -> dict: print("⏩ Excluded crate %s at %s" % (name, path)) continue crates[name] = (path, manifest) - + return crates # Check that there are no duplicate entries in the workspace. @@ -138,23 +138,23 @@ def check_links(all_crates): if not 'path' in deps[dep]: broken.append((name, dep_name, "crate must be linked via `path`")) return - + def check_crate(deps): to_checks = ['dependencies', 'dev-dependencies', 'build-dependencies'] for to_check in to_checks: if to_check in deps: check_deps(deps[to_check]) - + # There could possibly target dependant deps: if 'target' in manifest: # Target dependant deps can only have one level of nesting: for _, target in manifest['target'].items(): check_crate(target) - + check_crate(manifest) - + links.sort() broken.sort() diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index e1e92d288ceae235d23fa36c31d592092fe8b0ba..c32b6fcf89e06bb56cefc0517e1dcab1d1ef0f37 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -42,5 +42,4 @@ jobs: shopt -s globstar npx @paritytech/license-scanner scan \ --ensure-licenses ${{ env.LICENSES }} \ - --exclude ./substrate/bin/node-template \ -- ./substrate/**/*.rs diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index 91701ca036e0075236917a1c2c82079a27f904db..c31dee06ec54a0154efc3ad46ff24c79de4d0d7b 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -57,3 +57,11 @@ jobs: echo "Checking for PR#${GITHUB_PR}" echo "You can find more information about PRDoc at $PRDOC_DOC" $ENGINE run --rm -v $PWD:/repo -e RUST_LOG=info $IMAGE check -n ${GITHUB_PR} + + - name: Validate prdoc for PR#${{ github.event.pull_request.number }} + if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} + run: | + echo "Validating PR#${GITHUB_PR}" + python3 --version + python3 -m pip install cargo-workspace==1.2.1 + python3 .github/scripts/check-prdoc.py Cargo.toml prdoc/pr_${GITHUB_PR}.prdoc diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 15b4869997be186c71cecbc89060b7591d71ffa3..f8de6135572565d9d16465e68aa3f0bace915cc5 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -91,7 +91,7 @@ build-rustdoc: - .run-immediately variables: SKIP_WASM_BUILD: 1 - RUSTDOCFLAGS: "" + RUSTDOCFLAGS: "--default-theme=ayu --html-in-header ./docs/sdk/headers/header.html --extend-css ./docs/sdk/headers/theme.css" artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc" when: on_success @@ -337,7 +337,7 @@ build-runtimes-polkavm: - .common-refs - .run-immediately script: - - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p minimal-runtime + - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p minimal-template-runtime - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p westend-runtime - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p rococo-runtime - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p polkadot-test-runtime diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index b5e26d194896aad7839dce34460409fe9ceaa045..5c41a3e6e08fae521c41a66735ac54df51e39057 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -25,6 +25,7 @@ test-linux-stable: # "upgrade_version_checks_should_work" is currently failing - | time cargo nextest run \ + --filter-expr 'not deps(/polkadot-subsystem-bench/)' \ --workspace \ --locked \ --release \ @@ -48,6 +49,7 @@ test-linux-stable: - target/nextest/default/junit.xml reports: junit: target/nextest/default/junit.xml + timeout: 90m test-linux-oldkernel-stable: extends: test-linux-stable @@ -68,7 +70,7 @@ test-linux-stable-runtime-benchmarks: # but still want to have debug assertions. RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" script: - - time cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet + - time cargo nextest run --filter-expr 'not deps(/polkadot-subsystem-bench/)' --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet # can be used to run all tests # test-linux-stable-all: diff --git a/Cargo.lock b/Cargo.lock index ea2f27202fb68b4755869a36c1d085aabc6b7ceb..fa85dd3d8de9be58450b8413f67760a1b09f5dce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -322,6 +322,20 @@ dependencies = [ "num-traits", ] +[[package]] +name = "aquamarine" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1da02abba9f9063d786eab1509833ebb2fac0f966862ca59439c76b9c566760" +dependencies = [ + "include_dir", + "itertools 0.10.5", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "aquamarine" version = "0.5.0" @@ -4060,12 +4074,14 @@ dependencies = [ "cumulus-primitives-core", "cumulus-primitives-proof-size-hostfunction", "cumulus-test-runtime", - "docify", + "docify 0.2.7", + "frame-benchmarking", "frame-support", "frame-system", "log", "parity-scale-codec", "scale-info", + "sp-core", "sp-io", "sp-runtime", "sp-std 14.0.0", @@ -4171,6 +4187,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-overseer", "polkadot-primitives", + "polkadot-service", "sc-authority-discovery", "sc-client-api", "sc-network", @@ -4740,13 +4757,39 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "docify" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af1b04e6ef3d21119d3eb7b032bca17f99fe041e9c072f30f32cc0e1a2b1f3c4" +dependencies = [ + "docify_macros 0.1.16", +] + [[package]] name = "docify" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7cc4fd38aaa9fb98ac70794c82a00360d1e165a87fbf96a8a91f9dfc602aaee2" dependencies = [ - "docify_macros", + "docify_macros 0.2.7", +] + +[[package]] +name = "docify_macros" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b5610df7f2acf89a1bb5d1a66ae56b1c7fcdcfe3948856fb3ace3f644d70eb7" +dependencies = [ + "common-path", + "derive-syn-parse", + "lazy_static", + "proc-macro2", + "quote", + "regex", + "syn 2.0.50", + "termcolor", + "walkdir", ] [[package]] @@ -5455,7 +5498,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" name = "frame" version = "0.0.1-dev" dependencies = [ - "docify", + "docify 0.2.7", "frame-executive", "frame-support", "frame-system", @@ -5623,6 +5666,7 @@ dependencies = [ name = "frame-executive" version = "28.0.0" dependencies = [ + "aquamarine 0.3.3", "array-bytes 6.1.0", "frame-support", "frame-system", @@ -5679,11 +5723,11 @@ dependencies = [ name = "frame-support" version = "28.0.0" dependencies = [ - "aquamarine", + "aquamarine 0.5.0", "array-bytes 6.1.0", "assert_matches", "bitflags 1.3.2", - "docify", + "docify 0.2.7", "environmental", "frame-metadata", "frame-support-procedural", @@ -5713,6 +5757,7 @@ dependencies = [ "sp-staking", "sp-state-machine", "sp-std 14.0.0", + "sp-timestamp", "sp-tracing 16.0.0", "sp-weights", "static_assertions", @@ -5826,7 +5871,7 @@ version = "28.0.0" dependencies = [ "cfg-if", "criterion 0.4.0", - "docify", + "docify 0.2.7", "frame-support", "log", "parity-scale-codec", @@ -5900,7 +5945,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29f9df8a11882c4e3335eb2d18a0137c505d9ca927470b0cac9c6f0ae07d28f7" dependencies = [ - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -6789,7 +6834,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -7085,6 +7130,7 @@ dependencies = [ "pallet-lottery", "pallet-membership", "pallet-message-queue", + "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", @@ -7795,9 +7841,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "lioness" @@ -8131,15 +8177,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] -name = "minimal-node" -version = "4.0.0-dev" +name = "minimal-template-node" +version = "0.0.0" dependencies = [ "clap 4.5.1", "frame", "futures", "futures-timer", "jsonrpsee", - "minimal-runtime", + "minimal-template-runtime", "sc-basic-authorship", "sc-cli", "sc-client-api", @@ -8166,12 +8212,12 @@ dependencies = [ ] [[package]] -name = "minimal-runtime" -version = "0.1.0" +name = "minimal-template-runtime" +version = "0.0.0" dependencies = [ "frame", - "frame-support", "pallet-balances", + "pallet-minimal-template", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", @@ -8193,9 +8239,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -8685,50 +8731,6 @@ dependencies = [ "kitchensink-runtime", ] -[[package]] -name = "node-template" -version = "4.0.0-dev" -dependencies = [ - "clap 4.5.1", - "frame-benchmarking", - "frame-benchmarking-cli", - "frame-system", - "futures", - "jsonrpsee", - "node-template-runtime", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc", - "sc-basic-authorship", - "sc-cli", - "sc-client-api", - "sc-consensus", - "sc-consensus-aura", - "sc-consensus-grandpa", - "sc-executor", - "sc-network", - "sc-offchain", - "sc-rpc-api", - "sc-service", - "sc-telemetry", - "sc-transaction-pool", - "sc-transaction-pool-api", - "serde_json", - "sp-api", - "sp-block-builder", - "sp-blockchain", - "sp-consensus-aura", - "sp-consensus-grandpa", - "sp-core", - "sp-inherents", - "sp-io", - "sp-keyring", - "sp-runtime", - "sp-timestamp", - "substrate-build-script-utils", - "substrate-frame-rpc-system", - "try-runtime-cli", -] - [[package]] name = "node-template-release" version = "3.0.0" @@ -8743,45 +8745,6 @@ dependencies = [ "toml_edit 0.19.15", ] -[[package]] -name = "node-template-runtime" -version = "4.0.0-dev" -dependencies = [ - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "pallet-aura", - "pallet-balances", - "pallet-grandpa", - "pallet-sudo", - "pallet-template", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", - "scale-info", - "serde_json", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-consensus-grandpa", - "sp-core", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std 14.0.0", - "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "substrate-wasm-builder", -] - [[package]] name = "node-testing" version = "3.0.0-dev" @@ -9138,6 +9101,7 @@ dependencies = [ name = "pallet-asset-conversion-tx-payment" version = "10.0.0" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", "pallet-asset-conversion", @@ -9305,8 +9269,8 @@ dependencies = [ name = "pallet-bags-list" version = "27.0.0" dependencies = [ - "aquamarine", - "docify", + "aquamarine 0.5.0", + "docify 0.2.7", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -9354,7 +9318,7 @@ dependencies = [ name = "pallet-balances" version = "28.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -9969,6 +9933,26 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "pallet-example-single-block-migrations" +version = "0.0.1" +dependencies = [ + "docify 0.2.7", + "frame-executive", + "frame-support", + "frame-system", + "frame-try-runtime", + "log", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 14.0.0", + "sp-version", +] + [[package]] name = "pallet-example-split" version = "10.0.0" @@ -10010,6 +9994,7 @@ dependencies = [ "pallet-example-frame-crate", "pallet-example-kitchensink", "pallet-example-offchain-worker", + "pallet-example-single-block-migrations", "pallet-example-split", "pallet-example-tasks", ] @@ -10018,7 +10003,7 @@ dependencies = [ name = "pallet-fast-unstake" version = "27.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -10215,6 +10200,39 @@ dependencies = [ "sp-weights", ] +[[package]] +name = "pallet-migrations" +version = "1.0.0" +dependencies = [ + "docify 0.1.16", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "pretty_assertions", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 14.0.0", + "sp-tracing 16.0.0", + "sp-version", +] + +[[package]] +name = "pallet-minimal-template" +version = "0.0.0" +dependencies = [ + "frame", + "parity-scale-codec", + "scale-info", +] + [[package]] name = "pallet-mixnet" version = "0.4.0" @@ -10490,7 +10508,7 @@ dependencies = [ name = "pallet-paged-list" version = "0.6.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -10516,14 +10534,13 @@ dependencies = [ [[package]] name = "pallet-parachain-template" -version = "0.7.0" +version = "0.0.0" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "parity-scale-codec", "scale-info", - "serde", "sp-core", "sp-io", "sp-runtime", @@ -10533,7 +10550,7 @@ dependencies = [ name = "pallet-parameters" version = "0.0.1" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -10694,7 +10711,7 @@ dependencies = [ name = "pallet-safe-mode" version = "9.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -10751,7 +10768,7 @@ dependencies = [ name = "pallet-scheduler" version = "29.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -10964,7 +10981,7 @@ dependencies = [ name = "pallet-sudo" version = "28.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -10978,7 +10995,7 @@ dependencies = [ [[package]] name = "pallet-template" -version = "4.0.0-dev" +version = "0.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -10988,14 +11005,13 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] name = "pallet-timestamp" version = "27.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -11035,6 +11051,7 @@ dependencies = [ name = "pallet-transaction-payment" version = "28.0.0" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", "pallet-balances", @@ -11099,7 +11116,7 @@ dependencies = [ name = "pallet-treasury" version = "27.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -11119,7 +11136,7 @@ dependencies = [ name = "pallet-tx-pause" version = "9.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -11301,7 +11318,7 @@ dependencies = [ [[package]] name = "parachain-template-node" -version = "0.1.0" +version = "0.0.0" dependencies = [ "clap 4.5.1", "color-print", @@ -11359,7 +11376,7 @@ dependencies = [ [[package]] name = "parachain-template-runtime" -version = "0.7.0" +version = "0.0.0" dependencies = [ "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", @@ -12186,6 +12203,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "polkadot-subsystem-bench", "rand", "sc-network", "schnellru", @@ -12217,6 +12235,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "polkadot-subsystem-bench", "rand", "sc-network", "schnellru", @@ -12671,6 +12690,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", "sc-keystore", "sp-application-crypto", "sp-core", @@ -12694,6 +12714,8 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", + "schnellru", "sp-application-crypto", "sp-keystore", "thiserror", @@ -13387,13 +13409,27 @@ version = "0.0.1" dependencies = [ "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", - "docify", + "docify 0.2.7", "frame", + "frame-executive", + "frame-support", + "frame-system", "kitchensink-runtime", "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collective", "pallet-default-config-example", + "pallet-democracy", + "pallet-example-offchain-worker", + "pallet-example-single-block-migrations", "pallet-examples", + "pallet-multisig", + "pallet-proxy", + "pallet-scheduler", "pallet-timestamp", + "pallet-transaction-payment", + "pallet-utility", "parity-scale-codec", "sc-cli", "sc-client-db", @@ -13412,7 +13448,9 @@ dependencies = [ "sp-core", "sp-io", "sp-keyring", + "sp-offchain", "sp-runtime", + "sp-version", "staging-chain-spec-builder", "staging-node-cli", "staging-parachain-info", @@ -13427,6 +13465,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "async-trait", + "bitvec", "env_logger 0.9.3", "frame-benchmarking", "frame-benchmarking-cli", @@ -14227,7 +14266,7 @@ dependencies = [ "hex", "lazy_static", "procfs-core", - "rustix 0.38.21", + "rustix 0.38.25", ] [[package]] @@ -15352,14 +15391,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.21" +version = "0.38.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" +checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" dependencies = [ "bitflags 2.4.0", "errno", "libc", - "linux-raw-sys 0.4.10", + "linux-raw-sys 0.4.11", "windows-sys 0.48.0", ] @@ -15632,7 +15671,7 @@ name = "sc-chain-spec" version = "27.0.0" dependencies = [ "array-bytes 6.1.0", - "docify", + "docify 0.2.7", "log", "memmap2 0.9.3", "parity-scale-codec", @@ -16635,7 +16674,6 @@ dependencies = [ "hyper", "jsonrpsee", "log", - "pin-project", "serde_json", "substrate-prometheus-endpoint", "tokio", @@ -16734,6 +16772,7 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", + "schnellru", "serde", "serde_json", "sp-api", @@ -17822,7 +17861,7 @@ dependencies = [ [[package]] name = "snowbridge-beacon-primitives" -version = "0.0.0" +version = "0.2.0" dependencies = [ "byte-slice-cast", "frame-support", @@ -17846,7 +17885,7 @@ dependencies = [ [[package]] name = "snowbridge-core" -version = "0.0.0" +version = "0.2.0" dependencies = [ "ethabi-decode", "frame-support", @@ -17869,7 +17908,7 @@ dependencies = [ [[package]] name = "snowbridge-ethereum" -version = "0.1.0" +version = "0.3.0" dependencies = [ "ethabi-decode", "ethbloom", @@ -17908,7 +17947,7 @@ dependencies = [ [[package]] name = "snowbridge-outbound-queue-merkle-tree" -version = "0.1.1" +version = "0.3.0" dependencies = [ "array-bytes 4.2.0", "env_logger 0.9.3", @@ -17923,7 +17962,7 @@ dependencies = [ [[package]] name = "snowbridge-outbound-queue-runtime-api" -version = "0.0.0" +version = "0.2.0" dependencies = [ "frame-support", "parity-scale-codec", @@ -17937,7 +17976,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-ethereum-client" -version = "0.0.0" +version = "0.2.0" dependencies = [ "bp-runtime", "byte-slice-cast", @@ -17983,7 +18022,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-inbound-queue" -version = "0.0.0" +version = "0.2.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -18016,7 +18055,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-inbound-queue-fixtures" -version = "0.9.0" +version = "0.10.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -18030,7 +18069,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-outbound-queue" -version = "0.0.0" +version = "0.2.0" dependencies = [ "bridge-hub-common", "ethabi-decode", @@ -18055,7 +18094,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-system" -version = "0.0.0" +version = "0.2.0" dependencies = [ "ethabi-decode", "frame-benchmarking", @@ -18083,7 +18122,7 @@ dependencies = [ [[package]] name = "snowbridge-router-primitives" -version = "0.0.0" +version = "0.9.0" dependencies = [ "ethabi-decode", "frame-support", @@ -18106,7 +18145,7 @@ dependencies = [ [[package]] name = "snowbridge-runtime-common" -version = "0.0.0" +version = "0.2.0" dependencies = [ "frame-support", "frame-system", @@ -18122,7 +18161,7 @@ dependencies = [ [[package]] name = "snowbridge-runtime-test-common" -version = "0.0.0" +version = "0.2.0" dependencies = [ "assets-common", "bridge-hub-test-utils", @@ -18199,7 +18238,7 @@ dependencies = [ [[package]] name = "snowbridge-system-runtime-api" -version = "0.0.0" +version = "0.2.0" dependencies = [ "parity-scale-codec", "snowbridge-core", @@ -18246,6 +18285,87 @@ dependencies = [ "sha-1 0.9.8", ] +[[package]] +name = "solochain-template-node" +version = "0.0.0" +dependencies = [ + "clap 4.5.1", + "frame-benchmarking-cli", + "frame-system", + "futures", + "jsonrpsee", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-aura", + "sc-consensus-grandpa", + "sc-executor", + "sc-network", + "sc-offchain", + "sc-rpc-api", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "serde_json", + "solochain-template-runtime", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus-aura", + "sp-consensus-grandpa", + "sp-core", + "sp-inherents", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "substrate-build-script-utils", + "substrate-frame-rpc-system", + "try-runtime-cli", +] + +[[package]] +name = "solochain-template-runtime" +version = "0.0.0" +dependencies = [ + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-aura", + "pallet-balances", + "pallet-grandpa", + "pallet-sudo", + "pallet-template", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-consensus-grandpa", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 14.0.0", + "sp-storage 19.0.0", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder", +] + [[package]] name = "sp-api" version = "26.0.0" @@ -18614,7 +18734,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.4.1" -source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +source = "git+https://github.com/paritytech/polkadot-sdk#838a534da874cf6071fba1df07643c6c5b033ae0" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -18897,7 +19017,7 @@ dependencies = [ name = "sp-runtime" version = "31.0.1" dependencies = [ - "docify", + "docify 0.2.7", "either", "hash256-std-hasher", "impl-trait-for-tuples", @@ -18919,6 +19039,7 @@ dependencies = [ "sp-tracing 16.0.0", "sp-weights", "substrate-test-runtime-client", + "tuplex", "zstd 0.12.4", ] @@ -18967,7 +19088,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +source = "git+https://github.com/paritytech/polkadot-sdk#838a534da874cf6071fba1df07643c6c5b033ae0" dependencies = [ "Inflector", "proc-macro-crate 1.3.1", @@ -20186,7 +20307,7 @@ dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.4.1", - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -20205,7 +20326,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -21051,6 +21172,12 @@ dependencies = [ "utf-8", ] +[[package]] +name = "tuplex" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "676ac81d5454c4dcf37955d34fa8626ede3490f744b86ca14a7b90168d2a08aa" + [[package]] name = "twox-hash" version = "1.6.3" diff --git a/Cargo.toml b/Cargo.toml index 1d27cfe95392b32a4d867595ace4733beeb2ff69..a2e22e0b9271da67f03f2fb9ac2835d489452739 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ authors = ["Parity Technologies "] edition = "2021" repository = "https://github.com/paritytech/polkadot-sdk.git" license = "GPL-3.0-only" +homepage = "https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html" [workspace] resolver = "2" @@ -74,9 +75,6 @@ members = [ "cumulus/pallets/solo-to-para", "cumulus/pallets/xcm", "cumulus/pallets/xcmp-queue", - "cumulus/parachain-template/node", - "cumulus/parachain-template/pallets/template", - "cumulus/parachain-template/runtime", "cumulus/parachains/common", "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo", "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend", @@ -219,11 +217,6 @@ members = [ "polkadot/xcm/xcm-simulator", "polkadot/xcm/xcm-simulator/example", "polkadot/xcm/xcm-simulator/fuzzer", - "substrate/bin/minimal/node", - "substrate/bin/minimal/runtime", - "substrate/bin/node-template/node", - "substrate/bin/node-template/pallets/template", - "substrate/bin/node-template/runtime", "substrate/bin/node/bench", "substrate/bin/node/cli", "substrate/bin/node/inspect", @@ -337,6 +330,7 @@ members = [ "substrate/frame/examples/frame-crate", "substrate/frame/examples/kitchensink", "substrate/frame/examples/offchain-worker", + "substrate/frame/examples/single-block-migrations", "substrate/frame/examples/split", "substrate/frame/examples/tasks", "substrate/frame/executive", @@ -351,6 +345,7 @@ members = [ "substrate/frame/membership", "substrate/frame/merkle-mountain-range", "substrate/frame/message-queue", + "substrate/frame/migrations", "substrate/frame/mixnet", "substrate/frame/multisig", "substrate/frame/nft-fractionalization", @@ -502,6 +497,18 @@ members = [ "substrate/utils/frame/try-runtime/cli", "substrate/utils/prometheus", "substrate/utils/wasm-builder", + + "templates/minimal/node", + "templates/minimal/pallets/template", + "templates/minimal/runtime", + + "templates/solochain/node", + "templates/solochain/pallets/template", + "templates/solochain/runtime", + + "templates/parachain/node", + "templates/parachain/pallets/template", + "templates/parachain/runtime", ] default-members = ["polkadot", "substrate/bin/node/cli"] diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index fac88b20ca57901d4116e147bd9363a41ff35e36..ee54f15f38b4c0a2e9faae3a157874898701ba2a 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -93,6 +93,7 @@ runtime-benchmarks = [ "pallet-bridge-messages/runtime-benchmarks", "pallet-bridge-parachains/runtime-benchmarks", "pallet-bridge-relayers/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", diff --git a/bridges/bin/runtime-common/src/lib.rs b/bridges/bin/runtime-common/src/lib.rs index 2722f6f1c6d14f09ab215f8f020f2c449eda4d4b..035077408c40bd42c9e941c19af868989d66e362 100644 --- a/bridges/bin/runtime-common/src/lib.rs +++ b/bridges/bin/runtime-common/src/lib.rs @@ -105,43 +105,48 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { ($call:ty, $account_id:ty, $($filter_call:ty),*) => { #[derive(Clone, codec::Decode, Default, codec::Encode, Eq, PartialEq, sp_runtime::RuntimeDebug, scale_info::TypeInfo)] pub struct BridgeRejectObsoleteHeadersAndMessages; - impl sp_runtime::traits::SignedExtension for BridgeRejectObsoleteHeadersAndMessages { + impl sp_runtime::traits::TransactionExtensionBase for BridgeRejectObsoleteHeadersAndMessages { const IDENTIFIER: &'static str = "BridgeRejectObsoleteHeadersAndMessages"; - type AccountId = $account_id; - type Call = $call; - type AdditionalSigned = (); + type Implicit = (); + } + impl sp_runtime::traits::TransactionExtension<$call, Context> for BridgeRejectObsoleteHeadersAndMessages { type Pre = (); - - fn additional_signed(&self) -> sp_std::result::Result< - (), - sp_runtime::transaction_validity::TransactionValidityError, - > { - Ok(()) - } + type Val = (); fn validate( &self, - _who: &Self::AccountId, - call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, + origin: <$call as sp_runtime::traits::Dispatchable>::RuntimeOrigin, + call: &$call, + _info: &sp_runtime::traits::DispatchInfoOf<$call>, _len: usize, - ) -> sp_runtime::transaction_validity::TransactionValidity { - let valid = sp_runtime::transaction_validity::ValidTransaction::default(); + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl codec::Encode, + ) -> Result< + ( + sp_runtime::transaction_validity::ValidTransaction, + Self::Val, + <$call as sp_runtime::traits::Dispatchable>::RuntimeOrigin, + ), sp_runtime::transaction_validity::TransactionValidityError + > { + let tx_validity = sp_runtime::transaction_validity::ValidTransaction::default(); $( - let valid = valid - .combine_with(<$filter_call as $crate::BridgeRuntimeFilterCall<$call>>::validate(call)?); + let call_filter_validity = <$filter_call as $crate::BridgeRuntimeFilterCall<$call>>::validate(call)?; + let tx_validity = tx_validity.combine_with(call_filter_validity); )* - Ok(valid) + Ok((tx_validity, (), origin)) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &sp_runtime::traits::DispatchInfoOf, - len: usize, + _val: Self::Val, + _origin: &<$call as sp_runtime::traits::Dispatchable>::RuntimeOrigin, + _call: &$call, + _info: &sp_runtime::traits::DispatchInfoOf<$call>, + _len: usize, + _context: &Context, ) -> Result { - self.validate(who, call, info, len).map(drop) + Ok(()) } } }; @@ -150,12 +155,14 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { #[cfg(test)] mod tests { use crate::BridgeRuntimeFilterCall; - use frame_support::{assert_err, assert_ok}; + use codec::Encode; + use frame_support::assert_err; use sp_runtime::{ - traits::SignedExtension, + traits::DispatchTransaction, transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, }; + #[derive(Encode)] pub struct MockCall { data: u32, } @@ -206,17 +213,20 @@ mod tests { ); assert_err!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 1 }, &(), 0), + BridgeRejectObsoleteHeadersAndMessages.validate_only((), &MockCall { data: 1 }, &(), 0), InvalidTransaction::Custom(1) ); assert_err!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 2 }, &(), 0), + BridgeRejectObsoleteHeadersAndMessages.validate_only((), &MockCall { data: 2 }, &(), 0), InvalidTransaction::Custom(2) ); - assert_ok!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 3 }, &(), 0), + assert_eq!( + BridgeRejectObsoleteHeadersAndMessages + .validate_only((), &MockCall { data: 3 }, &(), 0) + .unwrap() + .0, ValidTransaction { priority: 3, ..Default::default() } ) } diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index 8877a4fd95ce33150824b78674f38860616cf820..f147f1404f06fb60c608d4ad049502c6e184a85f 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -164,6 +164,7 @@ impl pallet_balances::Config for TestRuntime { type AccountStore = System; } +#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig as pallet_transaction_payment::DefaultConfig)] impl pallet_transaction_payment::Config for TestRuntime { type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; type OperationalFeeMultiplier = ConstU8<5>; @@ -176,7 +177,6 @@ impl pallet_transaction_payment::Config for TestRuntime { MinimumMultiplier, MaximumMultiplier, >; - type RuntimeEvent = RuntimeEvent; } impl pallet_bridge_grandpa::Config for TestRuntime { diff --git a/bridges/bin/runtime-common/src/priority_calculator.rs b/bridges/bin/runtime-common/src/priority_calculator.rs index a597fb9e2f49289360acfd7ee305b44eb7874a3e..0c53018330ea0ebc2fbacb32808e01a9ec88960f 100644 --- a/bridges/bin/runtime-common/src/priority_calculator.rs +++ b/bridges/bin/runtime-common/src/priority_calculator.rs @@ -169,12 +169,15 @@ mod integrity_tests { // nodes to the proof (x0.5 because we expect some nodes to be reused) let estimated_message_size = 512; // let's say all our messages have the same dispatch weight - let estimated_message_dispatch_weight = - Runtime::WeightInfo::message_dispatch_weight(estimated_message_size); + let estimated_message_dispatch_weight = >::WeightInfo::message_dispatch_weight( + estimated_message_size + ); // messages proof argument size is (for every message) messages size + some additional // trie nodes. Some of them are reused by different messages, so let's take 2/3 of default // "overhead" constant - let messages_proof_size = Runtime::WeightInfo::expected_extra_storage_proof_size() + let messages_proof_size = >::WeightInfo::expected_extra_storage_proof_size() .saturating_mul(2) .saturating_div(3) .saturating_add(estimated_message_size) @@ -182,7 +185,7 @@ mod integrity_tests { // finally we are able to estimate transaction size and weight let transaction_size = base_tx_size.saturating_add(messages_proof_size); - let transaction_weight = Runtime::WeightInfo::receive_messages_proof_weight( + let transaction_weight = >::WeightInfo::receive_messages_proof_weight( &PreComputedSize(transaction_size as _), messages as _, estimated_message_dispatch_weight.saturating_mul(messages), diff --git a/bridges/bin/runtime-common/src/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/refund_relayer_extension.rs index bfcb82ad166c3a2a60891c1e18f2ad22e085cb1b..b912f8445ac1b455e95110d41347ac8e55afaa7c 100644 --- a/bridges/bin/runtime-common/src/refund_relayer_extension.rs +++ b/bridges/bin/runtime-common/src/refund_relayer_extension.rs @@ -48,9 +48,12 @@ use pallet_transaction_payment::{Config as TransactionPaymentConfig, OnChargeTra use pallet_utility::{Call as UtilityCall, Config as UtilityConfig, Pallet as UtilityPallet}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, Get, PostDispatchInfoOf, SignedExtension, Zero}, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, Get, PostDispatchInfoOf, + TransactionExtension, TransactionExtensionBase, ValidateResult, Zero, + }, transaction_validity::{ - TransactionPriority, TransactionValidity, TransactionValidityError, ValidTransactionBuilder, + InvalidTransaction, TransactionPriority, TransactionValidityError, ValidTransactionBuilder, }, DispatchResult, FixedPointOperand, RuntimeDebug, }; @@ -239,8 +242,8 @@ pub enum RelayerAccountAction { Slash(AccountId, RewardsAccountParams), } -/// Everything common among our refund signed extensions. -pub trait RefundSignedExtension: +/// Everything common among our refund transaction extensions. +pub trait RefundTransactionExtension: 'static + Clone + Codec + sp_std::fmt::Debug + Default + Eq + PartialEq + Send + Sync + TypeInfo where >::BridgedChain: @@ -456,8 +459,8 @@ where } } -/// Adapter that allow implementing `sp_runtime::traits::SignedExtension` for any -/// `RefundSignedExtension`. +/// Adapter that allow implementing `sp_runtime::traits::TransactionExtension` for any +/// `RefundTransactionExtension`. #[derive( DefaultNoBound, CloneNoBound, @@ -468,12 +471,13 @@ where RuntimeDebugNoBound, TypeInfo, )] -pub struct RefundSignedExtensionAdapter(T) +pub struct RefundTransactionExtensionAdapter(T) where >::BridgedChain: Chain; -impl SignedExtension for RefundSignedExtensionAdapter +impl TransactionExtensionBase + for RefundTransactionExtensionAdapter where >::BridgedChain: Chain, @@ -483,22 +487,35 @@ where + MessagesCallSubType::Instance>, { const IDENTIFIER: &'static str = T::Id::STR; - type AccountId = AccountIdOf; - type Call = CallOf; - type AdditionalSigned = (); - type Pre = Option>>; + type Implicit = (); +} - fn additional_signed(&self) -> Result<(), TransactionValidityError> { - Ok(()) - } +impl TransactionExtension, Context> + for RefundTransactionExtensionAdapter +where + >::BridgedChain: + Chain, + CallOf: Dispatchable + + IsSubType, T::Runtime>> + + GrandpaCallSubType + + MessagesCallSubType::Instance>, + as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner> + Clone, +{ + type Pre = Option>>; + type Val = Option; fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: as Dispatchable>::RuntimeOrigin, + call: &CallOf, + _info: &DispatchInfoOf>, _len: usize, - ) -> TransactionValidity { + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult> { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; // this is the only relevant line of code for the `pre_dispatch` // // we're not calling `validate` from `pre_dispatch` directly because of performance @@ -511,12 +528,12 @@ where // we only boost priority of presumably correct message delivery transactions let bundled_messages = match T::bundled_messages_for_priority_boost(parsed_call.as_ref()) { Some(bundled_messages) => bundled_messages, - None => return Ok(Default::default()), + None => return Ok((Default::default(), parsed_call, origin)), }; // we only boost priority if relayer has staked required balance if !RelayersPallet::::is_registration_active(who) { - return Ok(Default::default()) + return Ok((Default::default(), parsed_call, origin)) } // compute priority boost @@ -535,20 +552,21 @@ where priority_boost, ); - valid_transaction.build() + let validity = valid_transaction.build()?; + Ok((validity, parsed_call, origin)) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + val: Self::Val, + origin: & as Dispatchable>::RuntimeOrigin, + _call: &CallOf, + _info: &DispatchInfoOf>, _len: usize, + _context: &Context, ) -> Result { - // this is a relevant piece of `validate` that we need here (in `pre_dispatch`) - let parsed_call = T::parse_and_check_for_obsolete_call(call)?; - - Ok(parsed_call.map(|call_info| { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + Ok(val.map(|call_info| { log::trace!( target: "runtime::bridge", "{} via {:?} parsed bridge transaction in pre-dispatch: {:?}", @@ -561,13 +579,14 @@ where } fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + pre: Self::Pre, + info: &DispatchInfoOf>, + post_info: &PostDispatchInfoOf>, len: usize, result: &DispatchResult, + _context: &Context, ) -> Result<(), TransactionValidityError> { - let call_result = T::analyze_call_result(pre, info, post_info, len, result); + let call_result = T::analyze_call_result(Some(pre), info, post_info, len, result); match call_result { RelayerAccountAction::None => (), @@ -595,7 +614,7 @@ where } } -/// Signed extension that refunds a relayer for new messages coming from a parachain. +/// Transaction extension that refunds a relayer for new messages coming from a parachain. /// /// Also refunds relayer for successful finality delivery if it comes in batch (`utility.batchAll`) /// with message delivery transaction. Batch may deliver either both relay chain header and @@ -636,7 +655,7 @@ pub struct RefundBridgedParachainMessages, ); -impl RefundSignedExtension +impl RefundTransactionExtension for RefundBridgedParachainMessages where Self: 'static + Send + Sync, @@ -730,13 +749,13 @@ where } } -/// Signed extension that refunds a relayer for new messages coming from a standalone (GRANDPA) +/// Transaction extension that refunds a relayer for new messages coming from a standalone (GRANDPA) /// chain. /// /// Also refunds relayer for successful finality delivery if it comes in batch (`utility.batchAll`) /// with message delivery transaction. Batch may deliver either both relay chain header and -/// parachain head, or just parachain head. Corresponding headers must be used in messages -/// proof verification. +/// parachain head, or just parachain head. Corresponding headers must be used in messages proof +/// verification. /// /// Extension does not refund transaction tip due to security reasons. #[derive( @@ -771,7 +790,7 @@ pub struct RefundBridgedGrandpaMessages, ); -impl RefundSignedExtension +impl RefundTransactionExtension for RefundBridgedGrandpaMessages where Self: 'static + Send + Sync, @@ -869,8 +888,8 @@ mod tests { Call as ParachainsCall, Pallet as ParachainsPallet, RelayBlockHash, }; use sp_runtime::{ - traits::{ConstU64, Header as HeaderT}, - transaction_validity::{InvalidTransaction, ValidTransaction}, + traits::{ConstU64, DispatchTransaction, Header as HeaderT}, + transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, DispatchError, }; @@ -899,7 +918,7 @@ mod tests { ConstU64<1>, StrTestExtension, >; - type TestGrandpaExtension = RefundSignedExtensionAdapter; + type TestGrandpaExtension = RefundTransactionExtensionAdapter; type TestExtensionProvider = RefundBridgedParachainMessages< TestRuntime, DefaultRefundableParachainId<(), TestParachain>, @@ -908,7 +927,7 @@ mod tests { ConstU64<1>, StrTestExtension, >; - type TestExtension = RefundSignedExtensionAdapter; + type TestExtension = RefundTransactionExtensionAdapter; fn initial_balance_of_relayer_account_at_this_chain() -> ThisChainBalance { let test_stake: ThisChainBalance = TestStake::get(); @@ -1407,14 +1426,28 @@ mod tests { fn run_validate(call: RuntimeCall) -> TransactionValidity { let extension: TestExtension = - RefundSignedExtensionAdapter(RefundBridgedParachainMessages(PhantomData)); - extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + RefundTransactionExtensionAdapter(RefundBridgedParachainMessages(PhantomData)); + extension + .validate_only( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + ) + .map(|res| res.0) } fn run_grandpa_validate(call: RuntimeCall) -> TransactionValidity { let extension: TestGrandpaExtension = - RefundSignedExtensionAdapter(RefundBridgedGrandpaMessages(PhantomData)); - extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + RefundTransactionExtensionAdapter(RefundBridgedGrandpaMessages(PhantomData)); + extension + .validate_only( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + ) + .map(|res| res.0) } fn run_validate_ignore_priority(call: RuntimeCall) -> TransactionValidity { @@ -1428,16 +1461,30 @@ mod tests { call: RuntimeCall, ) -> Result>, TransactionValidityError> { let extension: TestExtension = - RefundSignedExtensionAdapter(RefundBridgedParachainMessages(PhantomData)); - extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + RefundTransactionExtensionAdapter(RefundBridgedParachainMessages(PhantomData)); + extension + .validate_and_prepare( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + ) + .map(|(pre, _)| pre) } fn run_grandpa_pre_dispatch( call: RuntimeCall, ) -> Result>, TransactionValidityError> { let extension: TestGrandpaExtension = - RefundSignedExtensionAdapter(RefundBridgedGrandpaMessages(PhantomData)); - extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + RefundTransactionExtensionAdapter(RefundBridgedGrandpaMessages(PhantomData)); + extension + .validate_and_prepare( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + ) + .map(|(pre, _)| pre) } fn dispatch_info() -> DispatchInfo { @@ -1460,11 +1507,12 @@ mod tests { dispatch_result: DispatchResult, ) { let post_dispatch_result = TestExtension::post_dispatch( - Some(pre_dispatch_data), + pre_dispatch_data, &dispatch_info(), &post_dispatch_info(), 1024, &dispatch_result, + &(), ); assert_eq!(post_dispatch_result, Ok(())); } diff --git a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs b/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs index c49aa4b856397d28746d017fd8333ae3ad10655e..f186f6427ae7d5cbac37c0dea528665ea474803e 100644 --- a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs +++ b/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs @@ -26,7 +26,7 @@ pub use bp_polkadot_core::{ }; use bp_messages::*; -use bp_polkadot_core::SuffixedCommonSignedExtension; +use bp_polkadot_core::SuffixedCommonTransactionExtension; use bp_runtime::extensions::{ BridgeRejectObsoleteHeadersAndMessages, RefundBridgedParachainMessagesSchema, }; @@ -164,7 +164,7 @@ pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024; pub const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 4096; /// Signed extension that is used by all bridge hubs. -pub type SignedExtension = SuffixedCommonSignedExtension<( +pub type TransactionExtension = SuffixedCommonTransactionExtension<( BridgeRejectObsoleteHeadersAndMessages, RefundBridgedParachainMessagesSchema, )>; diff --git a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs b/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs index c4e697fbe9526b85c7f10cf739c6893d50190fe9..992ef1bd7a10f5fd95d1a5b4ec1a1267543ce615 100644 --- a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs +++ b/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs @@ -107,5 +107,5 @@ frame_support::parameter_types! { /// Transaction fee that is paid at the Rococo BridgeHub for delivering single outbound message confirmation. /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) - pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 5_380_829_647; + pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 5_380_904_835; } diff --git a/bridges/primitives/chain-kusama/src/lib.rs b/bridges/primitives/chain-kusama/src/lib.rs index e3b4d0520f61c858b54d78dfa4a45f57bac411fb..253a1010e83df928cceae91a18c309959c33d94e 100644 --- a/bridges/primitives/chain-kusama/src/lib.rs +++ b/bridges/primitives/chain-kusama/src/lib.rs @@ -59,8 +59,8 @@ impl ChainWithGrandpa for Kusama { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -// The SignedExtension used by Kusama. -pub use bp_polkadot_core::CommonSignedExtension as SignedExtension; +// The TransactionExtension used by Kusama. +pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; /// Name of the parachains pallet in the Kusama runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/primitives/chain-polkadot-bulletin/src/lib.rs b/bridges/primitives/chain-polkadot-bulletin/src/lib.rs index f2eebf9312470a42e1d3a1c7d67ab8b7a38af189..73dd122bd153869b937ed65f8e7ea7f4dde79c7c 100644 --- a/bridges/primitives/chain-polkadot-bulletin/src/lib.rs +++ b/bridges/primitives/chain-polkadot-bulletin/src/lib.rs @@ -25,7 +25,7 @@ use bp_runtime::{ decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, extensions::{ CheckEra, CheckGenesis, CheckNonZeroSender, CheckNonce, CheckSpecVersion, CheckTxVersion, - CheckWeight, GenericSignedExtension, GenericSignedExtensionSchema, + CheckWeight, GenericTransactionExtension, GenericTransactionExtensionSchema, }, Chain, ChainId, TransactionEra, }; @@ -37,7 +37,12 @@ use frame_support::{ }; use frame_system::limits; use scale_info::TypeInfo; -use sp_runtime::{traits::DispatchInfoOf, transaction_validity::TransactionValidityError, Perbill}; +use sp_runtime::{ + impl_tx_ext_default, + traits::{Dispatchable, TransactionExtensionBase}, + transaction_validity::TransactionValidityError, + Perbill, +}; // This chain reuses most of Polkadot primitives. pub use bp_polkadot_core::{ @@ -71,10 +76,10 @@ pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024; pub const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 4096; /// This signed extension is used to ensure that the chain transactions are signed by proper -pub type ValidateSigned = GenericSignedExtensionSchema<(), ()>; +pub type ValidateSigned = GenericTransactionExtensionSchema<(), ()>; /// Signed extension schema, used by Polkadot Bulletin. -pub type SignedExtensionSchema = GenericSignedExtension<( +pub type TransactionExtensionSchema = GenericTransactionExtension<( ( CheckNonZeroSender, CheckSpecVersion, @@ -87,34 +92,30 @@ pub type SignedExtensionSchema = GenericSignedExtension<( ValidateSigned, )>; -/// Signed extension, used by Polkadot Bulletin. +/// Transaction extension, used by Polkadot Bulletin. #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub struct SignedExtension(SignedExtensionSchema); +pub struct TransactionExtension(TransactionExtensionSchema); -impl sp_runtime::traits::SignedExtension for SignedExtension { +impl TransactionExtensionBase for TransactionExtension { const IDENTIFIER: &'static str = "Not needed."; - type AccountId = (); - type Call = (); - type AdditionalSigned = - ::AdditionalSigned; - type Pre = (); + type Implicit = ::Implicit; - fn additional_signed(&self) -> Result { - self.0.additional_signed() + fn implicit(&self) -> Result { + ::implicit(&self.0) } +} - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } +impl sp_runtime::traits::TransactionExtension for TransactionExtension +where + C: Dispatchable, +{ + type Pre = (); + type Val = (); + + impl_tx_ext_default!(C; Context; validate prepare); } -impl SignedExtension { +impl TransactionExtension { /// Create signed extension from its components. pub fn from_params( spec_version: u32, @@ -123,7 +124,7 @@ impl SignedExtension { genesis_hash: Hash, nonce: Nonce, ) -> Self { - Self(GenericSignedExtension::new( + Self(GenericTransactionExtension::new( ( ( (), // non-zero sender diff --git a/bridges/primitives/chain-polkadot/src/lib.rs b/bridges/primitives/chain-polkadot/src/lib.rs index fc5e10308a8e33463a74c041f157daaef09cc9c8..e5e2e7c3a042abddd8fb3dbd6f1decf62529cfe3 100644 --- a/bridges/primitives/chain-polkadot/src/lib.rs +++ b/bridges/primitives/chain-polkadot/src/lib.rs @@ -61,8 +61,8 @@ impl ChainWithGrandpa for Polkadot { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -/// The SignedExtension used by Polkadot. -pub type SignedExtension = SuffixedCommonSignedExtension; +/// The TransactionExtension used by Polkadot. +pub type TransactionExtension = SuffixedCommonTransactionExtension; /// Name of the parachains pallet in the Polkadot runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/primitives/chain-rococo/src/lib.rs b/bridges/primitives/chain-rococo/src/lib.rs index f1b256f0f090f048cc8db3a16c112ed8b938f6ce..267c6b2b1f0292b64ca8aaf969845129dae64dd8 100644 --- a/bridges/primitives/chain-rococo/src/lib.rs +++ b/bridges/primitives/chain-rococo/src/lib.rs @@ -59,8 +59,8 @@ impl ChainWithGrandpa for Rococo { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -// The SignedExtension used by Rococo. -pub use bp_polkadot_core::CommonSignedExtension as SignedExtension; +// The TransactionExtension used by Rococo. +pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; /// Name of the parachains pallet in the Rococo runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/primitives/chain-westend/src/lib.rs b/bridges/primitives/chain-westend/src/lib.rs index f03fd2160a700eb3817a6feb629e9d366cc366aa..afa02e8ee541e5e2912b4b9a216feb633a07b617 100644 --- a/bridges/primitives/chain-westend/src/lib.rs +++ b/bridges/primitives/chain-westend/src/lib.rs @@ -59,8 +59,8 @@ impl ChainWithGrandpa for Westend { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -// The SignedExtension used by Westend. -pub use bp_polkadot_core::CommonSignedExtension as SignedExtension; +// The TransactionExtension used by Westend. +pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; /// Name of the parachains pallet in the Rococo runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/primitives/polkadot-core/src/lib.rs b/bridges/primitives/polkadot-core/src/lib.rs index df2836495bbe131e9cf810c43eb4af5eefaf43b7..d59b99db4b586dde7b2d645ff44c34b94f865f24 100644 --- a/bridges/primitives/polkadot-core/src/lib.rs +++ b/bridges/primitives/polkadot-core/src/lib.rs @@ -24,8 +24,8 @@ use bp_runtime::{ self, extensions::{ ChargeTransactionPayment, CheckEra, CheckGenesis, CheckNonZeroSender, CheckNonce, - CheckSpecVersion, CheckTxVersion, CheckWeight, GenericSignedExtension, - SignedExtensionSchema, + CheckSpecVersion, CheckTxVersion, CheckWeight, GenericTransactionExtension, + TransactionExtensionSchema, }, EncodedOrDecodedCall, StorageMapKeyProvider, TransactionEra, }; @@ -229,8 +229,12 @@ pub type SignedBlock = generic::SignedBlock; pub type Balance = u128; /// Unchecked Extrinsic type. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic, Signature, SignedExt>; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic< + AccountAddress, + EncodedOrDecodedCall, + Signature, + TransactionExt, +>; /// Account address, used by the Polkadot-like chain. pub type Address = MultiAddress; @@ -275,7 +279,7 @@ impl AccountInfoStorageMapKeyProvider { } /// Extra signed extension data that is used by most chains. -pub type CommonSignedExtra = ( +pub type CommonTransactionExtra = ( CheckNonZeroSender, CheckSpecVersion, CheckTxVersion, @@ -286,12 +290,12 @@ pub type CommonSignedExtra = ( ChargeTransactionPayment, ); -/// Extra signed extension data that starts with `CommonSignedExtra`. -pub type SuffixedCommonSignedExtension = - GenericSignedExtension<(CommonSignedExtra, Suffix)>; +/// Extra transaction extension data that starts with `CommonTransactionExtra`. +pub type SuffixedCommonTransactionExtension = + GenericTransactionExtension<(CommonTransactionExtra, Suffix)>; -/// Helper trait to define some extra methods on `SuffixedCommonSignedExtension`. -pub trait SuffixedCommonSignedExtensionExt { +/// Helper trait to define some extra methods on `SuffixedCommonTransactionExtension`. +pub trait SuffixedCommonTransactionExtensionExt { /// Create signed extension from its components. fn from_params( spec_version: u32, @@ -300,7 +304,7 @@ pub trait SuffixedCommonSignedExtensionExt { genesis_hash: Hash, nonce: Nonce, tip: Balance, - extra: (Suffix::Payload, Suffix::AdditionalSigned), + extra: (Suffix::Payload, Suffix::Implicit), ) -> Self; /// Return transaction nonce. @@ -310,9 +314,10 @@ pub trait SuffixedCommonSignedExtensionExt { fn tip(&self) -> Balance; } -impl SuffixedCommonSignedExtensionExt for SuffixedCommonSignedExtension +impl SuffixedCommonTransactionExtensionExt + for SuffixedCommonTransactionExtension where - Suffix: SignedExtensionSchema, + Suffix: TransactionExtensionSchema, { fn from_params( spec_version: u32, @@ -321,9 +326,9 @@ where genesis_hash: Hash, nonce: Nonce, tip: Balance, - extra: (Suffix::Payload, Suffix::AdditionalSigned), + extra: (Suffix::Payload, Suffix::Implicit), ) -> Self { - GenericSignedExtension::new( + GenericTransactionExtension::new( ( ( (), // non-zero sender @@ -365,7 +370,7 @@ where } /// Signed extension that is used by most chains. -pub type CommonSignedExtension = SuffixedCommonSignedExtension<()>; +pub type CommonTransactionExtension = SuffixedCommonTransactionExtension<()>; #[cfg(test)] mod tests { diff --git a/bridges/primitives/runtime/src/extensions.rs b/bridges/primitives/runtime/src/extensions.rs index d896bc92efffc4e8fcb427ffa7057dece6f17241..a31e7b5bb47a64ec2333bbaba3e9c520aa53ef5a 100644 --- a/bridges/primitives/runtime/src/extensions.rs +++ b/bridges/primitives/runtime/src/extensions.rs @@ -20,135 +20,138 @@ use codec::{Compact, Decode, Encode}; use impl_trait_for_tuples::impl_for_tuples; use scale_info::{StaticTypeInfo, TypeInfo}; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension}, + impl_tx_ext_default, + traits::{Dispatchable, TransactionExtension, TransactionExtensionBase}, transaction_validity::TransactionValidityError, }; use sp_std::{fmt::Debug, marker::PhantomData}; -/// Trait that describes some properties of a `SignedExtension` that are needed in order to send a -/// transaction to the chain. -pub trait SignedExtensionSchema: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo { +/// Trait that describes some properties of a `TransactionExtension` that are needed in order to +/// send a transaction to the chain. +pub trait TransactionExtensionSchema: + Encode + Decode + Debug + Eq + Clone + StaticTypeInfo +{ /// A type of the data encoded as part of the transaction. type Payload: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo; /// Parameters which are part of the payload used to produce transaction signature, /// but don't end up in the transaction itself (i.e. inherent part of the runtime). - type AdditionalSigned: Encode + Debug + Eq + Clone + StaticTypeInfo; + type Implicit: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo; } -impl SignedExtensionSchema for () { +impl TransactionExtensionSchema for () { type Payload = (); - type AdditionalSigned = (); + type Implicit = (); } -/// An implementation of `SignedExtensionSchema` using generic params. +/// An implementation of `TransactionExtensionSchema` using generic params. #[derive(Encode, Decode, Clone, Debug, PartialEq, Eq, TypeInfo)] -pub struct GenericSignedExtensionSchema(PhantomData<(P, S)>); +pub struct GenericTransactionExtensionSchema(PhantomData<(P, S)>); -impl SignedExtensionSchema for GenericSignedExtensionSchema +impl TransactionExtensionSchema for GenericTransactionExtensionSchema where P: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo, - S: Encode + Debug + Eq + Clone + StaticTypeInfo, + S: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo, { type Payload = P; - type AdditionalSigned = S; + type Implicit = S; } -/// The `SignedExtensionSchema` for `frame_system::CheckNonZeroSender`. -pub type CheckNonZeroSender = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `frame_system::CheckNonZeroSender`. +pub type CheckNonZeroSender = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `frame_system::CheckSpecVersion`. -pub type CheckSpecVersion = GenericSignedExtensionSchema<(), u32>; +/// The `TransactionExtensionSchema` for `frame_system::CheckSpecVersion`. +pub type CheckSpecVersion = GenericTransactionExtensionSchema<(), u32>; -/// The `SignedExtensionSchema` for `frame_system::CheckTxVersion`. -pub type CheckTxVersion = GenericSignedExtensionSchema<(), u32>; +/// The `TransactionExtensionSchema` for `frame_system::CheckTxVersion`. +pub type CheckTxVersion = GenericTransactionExtensionSchema<(), u32>; -/// The `SignedExtensionSchema` for `frame_system::CheckGenesis`. -pub type CheckGenesis = GenericSignedExtensionSchema<(), Hash>; +/// The `TransactionExtensionSchema` for `frame_system::CheckGenesis`. +pub type CheckGenesis = GenericTransactionExtensionSchema<(), Hash>; -/// The `SignedExtensionSchema` for `frame_system::CheckEra`. -pub type CheckEra = GenericSignedExtensionSchema; +/// The `TransactionExtensionSchema` for `frame_system::CheckEra`. +pub type CheckEra = GenericTransactionExtensionSchema; -/// The `SignedExtensionSchema` for `frame_system::CheckNonce`. -pub type CheckNonce = GenericSignedExtensionSchema, ()>; +/// The `TransactionExtensionSchema` for `frame_system::CheckNonce`. +pub type CheckNonce = GenericTransactionExtensionSchema, ()>; -/// The `SignedExtensionSchema` for `frame_system::CheckWeight`. -pub type CheckWeight = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `frame_system::CheckWeight`. +pub type CheckWeight = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `pallet_transaction_payment::ChargeTransactionPayment`. -pub type ChargeTransactionPayment = GenericSignedExtensionSchema, ()>; +/// The `TransactionExtensionSchema` for `pallet_transaction_payment::ChargeTransactionPayment`. +pub type ChargeTransactionPayment = + GenericTransactionExtensionSchema, ()>; -/// The `SignedExtensionSchema` for `polkadot-runtime-common::PrevalidateAttests`. -pub type PrevalidateAttests = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `polkadot-runtime-common::PrevalidateAttests`. +pub type PrevalidateAttests = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `BridgeRejectObsoleteHeadersAndMessages`. -pub type BridgeRejectObsoleteHeadersAndMessages = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `BridgeRejectObsoleteHeadersAndMessages`. +pub type BridgeRejectObsoleteHeadersAndMessages = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `RefundBridgedParachainMessages`. +/// The `TransactionExtensionSchema` for `RefundBridgedParachainMessages`. /// This schema is dedicated for `RefundBridgedParachainMessages` signed extension as /// wildcard/placeholder, which relies on the scale encoding for `()` or `((), ())`, or `((), (), /// ())` is the same. So runtime can contains any kind of tuple: /// `(BridgeRefundBridgeHubRococoMessages)` /// `(BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWestendMessages)` /// `(BridgeRefundParachainMessages1, ..., BridgeRefundParachainMessagesN)` -pub type RefundBridgedParachainMessagesSchema = GenericSignedExtensionSchema<(), ()>; +pub type RefundBridgedParachainMessagesSchema = GenericTransactionExtensionSchema<(), ()>; #[impl_for_tuples(1, 12)] -impl SignedExtensionSchema for Tuple { +impl TransactionExtensionSchema for Tuple { for_tuples!( type Payload = ( #( Tuple::Payload ),* ); ); - for_tuples!( type AdditionalSigned = ( #( Tuple::AdditionalSigned ),* ); ); + for_tuples!( type Implicit = ( #( Tuple::Implicit ),* ); ); } /// A simplified version of signed extensions meant for producing signed transactions /// and signed payloads in the client code. #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub struct GenericSignedExtension { +pub struct GenericTransactionExtension { /// A payload that is included in the transaction. pub payload: S::Payload, #[codec(skip)] // It may be set to `None` if extensions are decoded. We are never reconstructing transactions - // (and it makes no sense to do that) => decoded version of `SignedExtensions` is only used to - // read fields of the `payload`. And when resigning transaction, we're reconstructing - // `SignedExtensions` from scratch. - additional_signed: Option, + // (and it makes no sense to do that) => decoded version of `TransactionExtensions` is only + // used to read fields of the `payload`. And when resigning transaction, we're reconstructing + // `TransactionExtensions` from scratch. + implicit: Option, } -impl GenericSignedExtension { - /// Create new `GenericSignedExtension` object. - pub fn new(payload: S::Payload, additional_signed: Option) -> Self { - Self { payload, additional_signed } +impl GenericTransactionExtension { + /// Create new `GenericTransactionExtension` object. + pub fn new(payload: S::Payload, implicit: Option) -> Self { + Self { payload, implicit } } } -impl SignedExtension for GenericSignedExtension +impl TransactionExtensionBase for GenericTransactionExtension where - S: SignedExtensionSchema, + S: TransactionExtensionSchema, S::Payload: Send + Sync, - S::AdditionalSigned: Send + Sync, + S::Implicit: Send + Sync, { const IDENTIFIER: &'static str = "Not needed."; - type AccountId = (); - type Call = (); - type AdditionalSigned = S::AdditionalSigned; - type Pre = (); + type Implicit = S::Implicit; - fn additional_signed(&self) -> Result { + fn implicit(&self) -> Result { // we shall not ever see this error in relay, because we are never signing decoded // transactions. Instead we're constructing and signing new transactions. So the error code // is kinda random here - self.additional_signed.clone().ok_or( - frame_support::unsigned::TransactionValidityError::Unknown( + self.implicit + .clone() + .ok_or(frame_support::unsigned::TransactionValidityError::Unknown( frame_support::unsigned::UnknownTransaction::Custom(0xFF), - ), - ) + )) } +} +impl TransactionExtension for GenericTransactionExtension +where + C: Dispatchable, + S: TransactionExtensionSchema, + S::Payload: Send + Sync, + S::Implicit: Send + Sync, +{ + type Pre = (); + type Val = (); - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } + impl_tx_ext_default!(C; Context; validate prepare); } diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index 99b4290531145dba85521574a6d5004e7696cb08..c8999633c97abb00174e38e16ed5618e7baf0b59 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-ethereum-client" description = "Snowbridge Ethereum Client Pallet" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index c26ef90a22dbeca766cb7064c1947b52c2e99794..b850496cd4e14cd906565d488450b339a29f463f 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-inbound-queue" description = "Snowbridge Inbound Queue Pallet" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml index 61f1421e056773c4f078390f9c48f7b8fa0420d3..64605a42f0d383d838429eb9b82b5f6cf238ab09 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-inbound-queue-fixtures" description = "Snowbridge Inbound Queue Test Fixtures" -version = "0.9.0" +version = "0.10.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index 110f611c6766020039bd1f73def900914da8cae2..749fb0367f332d743b01ad9d56238106ced36e72 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -3,7 +3,7 @@ use super::*; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU128, ConstU32, Everything}, weights::IdentityFee, }; @@ -47,10 +47,9 @@ parameter_types! { type Balance = u128; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeTask = RuntimeTask; @@ -60,16 +59,8 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; - type DbWeight = (); - type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; type Nonce = u64; type Block = Block; } diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml index 40e57db4bbd9e5d6e32cee84e586a8858b2222cf..f16a28cb1e457d9ebfb7804fa013e5b57858f79e 100644 --- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-outbound-queue" description = "Snowbridge Outbound Queue Pallet" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml index c185d5af7062045f40946fcbd3c45cb62b932216..0606e9de33056c9dffae50befcc1da5e865dca44 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-outbound-queue-merkle-tree" description = "Snowbridge Outbound Queue Merkle Tree" -version = "0.1.1" +version = "0.3.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml index 347b3bae493b7491790854be7a28f82386d2ee4b..cb68fd0a250a92e7f6a6693f3aebf1c8553308aa 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-outbound-queue-runtime-api" description = "Snowbridge Outbound Queue Runtime API" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs index dd8fee4e2ed08ec0f3090b765fa882b063a98300..6e78fb4467210e3cb5e1eb581b377cbbfeac74ad 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs @@ -3,7 +3,7 @@ use super::*; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{Everything, Hooks}, weights::IdentityFee, }; @@ -37,10 +37,9 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeTask = RuntimeTask; @@ -50,16 +49,7 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; - type DbWeight = (); - type Version = (); type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; type Nonce = u64; type Block = Block; } diff --git a/bridges/snowbridge/pallets/system/Cargo.toml b/bridges/snowbridge/pallets/system/Cargo.toml index f6c642e7376f7f068af1fe3c2cf9b11a2c50f2cc..5ad04290de044a2c8ed13aa092f5ea033aaafb97 100644 --- a/bridges/snowbridge/pallets/system/Cargo.toml +++ b/bridges/snowbridge/pallets/system/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-system" description = "Snowbridge System Pallet" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml index 355d2d29147f3cd84ae013363db874c9b9739b8e..eb02ae1db529730f51743e79a322e54db44fee51 100644 --- a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-system-runtime-api" description = "Snowbridge System Runtime API" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index edc3f141b0735d7439b120c51da836fb8a77bd04..de2970dd550ba75fe42de08dc4d297cd5cccdf1f 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -2,8 +2,8 @@ // SPDX-FileCopyrightText: 2023 Snowfork use crate as snowbridge_system; use frame_support::{ - parameter_types, - traits::{tokens::fungible::Mutate, ConstU128, ConstU16, ConstU64, ConstU8}, + derive_impl, parameter_types, + traits::{tokens::fungible::Mutate, ConstU128, ConstU64, ConstU8}, weights::IdentityFee, PalletId, }; @@ -95,11 +95,9 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeTask = RuntimeTask; @@ -109,15 +107,8 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; - type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; type Nonce = u64; type Block = Block; } diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..77b5c1aa631db89a986837f258ee7dea45a580d0 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use crate as ethereum_beacon_client; +use frame_support::parameter_types; +use pallet_timestamp; +use primitives::{Fork, ForkVersions}; +use sp_core::H256; +use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; + +#[cfg(not(feature = "beacon-spec-mainnet"))] +pub mod minimal { + use super::*; + + use crate::config; + use frame_support::derive_impl; + use hex_literal::hex; + use primitives::CompactExecutionHeader; + use snowbridge_core::inbound::{Log, Proof}; + use sp_runtime::BuildStorage; + use std::{fs::File, path::PathBuf}; + + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test { + System: frame_system::{Pallet, Call, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event}, + } + ); + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; + } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type PalletInfo = PalletInfo; + type SS58Prefix = SS58Prefix; + type Nonce = u64; + type Block = Block; + } + + impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = (); + type WeightInfo = (); + } + + parameter_types! { + pub const ExecutionHeadersPruneThreshold: u32 = 10; + pub const ChainForkVersions: ForkVersions = ForkVersions{ + genesis: Fork { + version: [0, 0, 0, 1], // 0x00000001 + epoch: 0, + }, + altair: Fork { + version: [1, 0, 0, 1], // 0x01000001 + epoch: 0, + }, + bellatrix: Fork { + version: [2, 0, 0, 1], // 0x02000001 + epoch: 0, + }, + capella: Fork { + version: [3, 0, 0, 1], // 0x03000001 + epoch: 0, + }, + }; + } + + impl ethereum_beacon_client::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ForkVersions = ChainForkVersions; + type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; + type WeightInfo = (); + } + + // Build genesis storage according to the mock runtime. + pub fn new_tester() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); + ext + } + + fn load_fixture(basename: &str) -> Result + where + T: for<'de> serde::Deserialize<'de>, + { + let filepath: PathBuf = + [env!("CARGO_MANIFEST_DIR"), "tests", "fixtures", basename].iter().collect(); + serde_json::from_reader(File::open(filepath).unwrap()) + } + + pub fn load_execution_header_update_fixture() -> primitives::ExecutionHeaderUpdate { + load_fixture("execution-header-update.minimal.json").unwrap() + } + + pub fn load_checkpoint_update_fixture( + ) -> primitives::CheckpointUpdate<{ config::SYNC_COMMITTEE_SIZE }> { + load_fixture("initial-checkpoint.minimal.json").unwrap() + } + + pub fn load_sync_committee_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("sync-committee-update.minimal.json").unwrap() + } + + pub fn load_finalized_header_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("finalized-header-update.minimal.json").unwrap() + } + + pub fn load_next_sync_committee_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("next-sync-committee-update.minimal.json").unwrap() + } + + pub fn load_next_finalized_header_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("next-finalized-header-update.minimal.json").unwrap() + } + + pub fn get_message_verification_payload() -> (Log, Proof) { + ( + Log { + address: hex!("ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0").into(), + topics: vec![ + hex!("1b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ad").into(), + hex!("00000000000000000000000000000000000000000000000000000000000003e8").into(), + hex!("0000000000000000000000000000000000000000000000000000000000000001").into(), + ], + data: hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000").into(), + }, + Proof { + block_hash: hex!("05aaa60b0f27cce9e71909508527264b77ee14da7b5bf915fcc4e32715333213").into(), + tx_index: 0, + data: (vec![ + hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb").to_vec(), + hex!("d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c185510").to_vec(), + hex!("b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646").to_vec(), + ], vec![ + hex!("f90131a0b601337b3aa10a671caa724eba641e759399979856141d3aea6b6b4ac59b889ba00c7d5dd48be9060221a02fb8fa213860b4c50d47046c8fa65ffaba5737d569e0a094601b62a1086cd9c9cb71a7ebff9e718f3217fd6e837efe4246733c0a196f63a06a4b0dd0aefc37b3c77828c8f07d1b7a2455ceb5dbfd3c77d7d6aeeddc2f7e8ca0d6e8e23142cdd8ec219e1f5d8b56aa18e456702b195deeaa210327284d42ade4a08a313d4c87023005d1ab631bbfe3f5de1e405d0e66d0bef3e033f1e5711b5521a0bf09a5d9a48b10ade82b8d6a5362a15921c8b5228a3487479b467db97411d82fa0f95cccae2a7c572ef3c566503e30bac2b2feb2d2f26eebf6d870dcf7f8cf59cea0d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c1855108080808080808080").to_vec(), + hex!("f851a0b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646a060a634b9280e3a23fb63375e7bbdd9ab07fd379ab6a67e2312bbc112195fa358808080808080808080808080808080").to_vec(), + hex!("f9030820b9030402f90300018301d6e2b9010000000000000800000000000020040008000000000000000000000000400000008000000000000000000000000000000000000000000000000000000000042010000000001000000000000000000000000000000000040000000000000000000000000000000000000000000000008000000000000000002000000000000000000000000200000000000000200000000000100000000040000001000200008000000000000200000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000f901f5f87a942ffa5ecdbe006d30397c7636d3e015eee251369ff842a0c965575a00553e094ca7c5d14f02e107c258dda06867cbf9e0e69f80e71bbcc1a000000000000000000000000000000000000000000000000000000000000003e8a000000000000000000000000000000000000000000000000000000000000003e8f9011c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000001b8a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000f858948cf6147918a5cbb672703f879f385036f8793a24e1a01449abf21e49fd025f33495e77f7b1461caefdd3d4bb646424a3f445c4576a5ba0000000000000000000000000440edffa1352b13227e8ee646f3ea37456dec701").to_vec(), + ]), + } + ) + } + + pub fn get_message_verification_header() -> CompactExecutionHeader { + CompactExecutionHeader { + parent_hash: hex!("04a7f6ab8282203562c62f38b0ab41d32aaebe2c7ea687702b463148a6429e04") + .into(), + block_number: 55, + state_root: hex!("894d968712976d613519f973a317cb0781c7b039c89f27ea2b7ca193f7befdb3") + .into(), + receipts_root: hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb") + .into(), + } + } +} + +#[cfg(feature = "beacon-spec-mainnet")] +pub mod mainnet { + use super::*; + use frame_support::derive_impl; + + type Block = frame_system::mocking::MockBlock; + use sp_runtime::BuildStorage; + + frame_support::construct_runtime!( + pub enum Test { + System: frame_system::{Pallet, Call, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event}, + } + ); + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; + } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type PalletInfo = PalletInfo; + type SS58Prefix = SS58Prefix; + type Nonce = u64; + type Block = Block; + } + + impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = (); + type WeightInfo = (); + } + + parameter_types! { + pub const ChainForkVersions: ForkVersions = ForkVersions{ + genesis: Fork { + version: [0, 0, 16, 32], // 0x00001020 + epoch: 0, + }, + altair: Fork { + version: [1, 0, 16, 32], // 0x01001020 + epoch: 36660, + }, + bellatrix: Fork { + version: [2, 0, 16, 32], // 0x02001020 + epoch: 112260, + }, + capella: Fork { + version: [3, 0, 16, 32], // 0x03001020 + epoch: 162304, + }, + }; + pub const ExecutionHeadersPruneThreshold: u32 = 10; + } + + impl ethereum_beacon_client::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ForkVersions = ChainForkVersions; + type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; + type WeightInfo = (); + } + + // Build genesis storage according to the mock runtime. + pub fn new_tester() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); + ext + } +} diff --git a/bridges/snowbridge/primitives/beacon/Cargo.toml b/bridges/snowbridge/primitives/beacon/Cargo.toml index e021bb01071471b56d44f4a430c9ae5b7bdca57d..d181fa1d3945a704a3d1e1e28fea67b7dea0ee15 100644 --- a/bridges/snowbridge/primitives/beacon/Cargo.toml +++ b/bridges/snowbridge/primitives/beacon/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-beacon-primitives" description = "Snowbridge Beacon Primitives" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index 090c48c403fecf76b598abcc390ffa054a8281a3..9a299ad0ae92326a6d0bb0391baf81e6e5bad663 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-core" description = "Snowbridge Core" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/primitives/ethereum/Cargo.toml b/bridges/snowbridge/primitives/ethereum/Cargo.toml index 399139cef3816a2f840f4937a7449034aea9638d..9fa725a6c0565a5f42847d89149878f8997d07a0 100644 --- a/bridges/snowbridge/primitives/ethereum/Cargo.toml +++ b/bridges/snowbridge/primitives/ethereum/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-ethereum" description = "Snowbridge Ethereum" -version = "0.1.0" +version = "0.3.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index 750a2a73e6372c138cb4420d5f06f26d35b90ab4..ded773e0d38917b7834679b3e521dfbe9539e51b 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-router-primitives" description = "Snowbridge Router Primitives" -version = "0.0.0" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/runtime/runtime-common/Cargo.toml index d4c86f8aa750134df8dfbe22a277a872e77f9175..bf5e9a8832dcf48113d5f74a92a060687da2fe4e 100644 --- a/bridges/snowbridge/runtime/runtime-common/Cargo.toml +++ b/bridges/snowbridge/runtime/runtime-common/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-runtime-common" description = "Snowbridge Runtime Common" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index ec4466b34265fd344e870f92f64eea014ad76b98..5f169e82f49346742bd97028da583105bf02335d 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-runtime-test-common" description = "Snowbridge Runtime Tests" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition = "2021" license = "Apache-2.0" @@ -181,6 +181,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/bridges/snowbridge/runtime/test-common/src/lib.rs b/bridges/snowbridge/runtime/test-common/src/lib.rs index 29b0e738c182e3a9dd930fe8596fc5430470d5e0..7455adf76170acefd50f06e8a40ef1c79028f49f 100644 --- a/bridges/snowbridge/runtime/test-common/src/lib.rs +++ b/bridges/snowbridge/runtime/test-common/src/lib.rs @@ -40,6 +40,7 @@ where } pub fn send_transfer_token_message( + ethereum_chain_id: u64, assethub_parachain_id: u32, weth_contract_address: H160, destination_address: H160, @@ -89,7 +90,7 @@ where WithdrawAsset(Assets::from(vec![fee.clone()])), BuyExecution { fees: fee, weight_limit: Unlimited }, ExportMessage { - network: Ethereum { chain_id: 11155111 }, + network: Ethereum { chain_id: ethereum_chain_id }, destination: Here, xcm: inner_xcm, }, @@ -107,6 +108,7 @@ where } pub fn send_transfer_token_message_success( + ethereum_chain_id: u64, collator_session_key: CollatorSessionKeys, runtime_para_id: u32, assethub_parachain_id: u32, @@ -149,6 +151,7 @@ pub fn send_transfer_token_message_success( initial_fund::(assethub_parachain_id, 5_000_000_000_000); let outcome = send_transfer_token_message::( + ethereum_chain_id, assethub_parachain_id, weth_contract_address, destination_address, @@ -205,6 +208,7 @@ pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works< XcmConfig, AllPalletsWithoutSystem, >( + ethereum_chain_id: u64, collator_session_key: CollatorSessionKeys, runtime_para_id: u32, assethub_parachain_id: u32, @@ -249,6 +253,7 @@ pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works< initial_fund::(assethub_parachain_id, 5_000_000_000_000); let outcome = send_transfer_token_message::( + ethereum_chain_id, assethub_parachain_id, weth_contract_address, destination_address, @@ -290,6 +295,7 @@ pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works< } pub fn send_unpaid_transfer_token_message( + ethereum_chain_id: u64, collator_session_key: CollatorSessionKeys, runtime_para_id: u32, assethub_parachain_id: u32, @@ -353,7 +359,7 @@ pub fn send_unpaid_transfer_token_message( let xcm = Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, ExportMessage { - network: Ethereum { chain_id: 11155111 }, + network: Ethereum { chain_id: ethereum_chain_id }, destination: Here, xcm: inner_xcm, }, @@ -375,6 +381,7 @@ pub fn send_unpaid_transfer_token_message( #[allow(clippy::too_many_arguments)] pub fn send_transfer_token_message_failure( + ethereum_chain_id: u64, collator_session_key: CollatorSessionKeys, runtime_para_id: u32, assethub_parachain_id: u32, @@ -414,6 +421,7 @@ pub fn send_transfer_token_message_failure( initial_fund::(assethub_parachain_id, initial_amount); let outcome = send_transfer_token_message::( + ethereum_chain_id, assethub_parachain_id, weth_contract_address, destination_address, diff --git a/bridges/snowbridge/scripts/contribute-upstream.sh b/bridges/snowbridge/scripts/contribute-upstream.sh index 8aa2d2a7035e2f213c0fc2090952b4b162739963..32005b770ecf44cb9af18c61f830243ed5287e68 100755 --- a/bridges/snowbridge/scripts/contribute-upstream.sh +++ b/bridges/snowbridge/scripts/contribute-upstream.sh @@ -40,10 +40,12 @@ git checkout "$branch_name" # remove everything we think is not required for our needs rm -rf rust-toolchain.toml +rm -rf codecov.yml rm -rf $SNOWBRIDGE_FOLDER/.cargo rm -rf $SNOWBRIDGE_FOLDER/.github rm -rf $SNOWBRIDGE_FOLDER/SECURITY.md rm -rf $SNOWBRIDGE_FOLDER/.gitignore +rm -rf $SNOWBRIDGE_FOLDER/rustfmt.toml rm -rf $SNOWBRIDGE_FOLDER/templates rm -rf $SNOWBRIDGE_FOLDER/pallets/ethereum-client/fuzz diff --git a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh index de5be2e0af8861008781ca3eab81f0422dc81e91..479ab833abfc38dd978c7b7d3abdd4c1fe37ad64 100755 --- a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh +++ b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh @@ -319,6 +319,7 @@ case "$1" in $XCM_VERSION ;; reserve-transfer-assets-from-asset-hub-rococo-local) + amount=$2 ensure_polkadot_js_api # send ROCs to Alice account on AHW limited_reserve_transfer_assets \ @@ -326,11 +327,12 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 5000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; withdraw-reserve-assets-from-asset-hub-rococo-local) + amount=$2 ensure_polkadot_js_api # send back only 100000000000 wrappedWNDs to Alice account on AHW limited_reserve_transfer_assets \ @@ -338,11 +340,12 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": 3000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; reserve-transfer-assets-from-asset-hub-westend-local) + amount=$2 ensure_polkadot_js_api # send WNDs to Alice account on AHR limited_reserve_transfer_assets \ @@ -350,11 +353,12 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 5000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; withdraw-reserve-assets-from-asset-hub-westend-local) + amount=$2 ensure_polkadot_js_api # send back only 100000000000 wrappedROCs to Alice account on AHR limited_reserve_transfer_assets \ @@ -362,7 +366,7 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": 3000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; diff --git a/bridges/testing/framework/js-helpers/native-assets-balance-increased.js b/bridges/testing/framework/js-helpers/native-assets-balance-increased.js index a35c753d97326d7b200c33844e9c5b8be22dfebc..749c3e2fec32ac0af4d244c53cb4ac1c6237817a 100644 --- a/bridges/testing/framework/js-helpers/native-assets-balance-increased.js +++ b/bridges/testing/framework/js-helpers/native-assets-balance-increased.js @@ -3,12 +3,13 @@ async function run(nodeName, networkInfo, args) { const api = await zombie.connect(wsUri, userDefinedTypes); const accountAddress = args[0]; + const expectedIncrease = BigInt(args[1]); const initialAccountData = await api.query.system.account(accountAddress); const initialAccountBalance = initialAccountData.data['free']; while (true) { const accountData = await api.query.system.account(accountAddress); const accountBalance = accountData.data['free']; - if (accountBalance > initialAccountBalance) { + if (accountBalance > initialAccountBalance + expectedIncrease) { return accountBalance; } @@ -17,4 +18,4 @@ async function run(nodeName, networkInfo, args) { } } -module.exports = { run } +module.exports = {run} diff --git a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl index 4725362ae826ec6d8313ec45c0ce21f70f244ca6..a58520ccea65b50dd0db1f67a72f6f8a4c5cdb38 100644 --- a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl @@ -2,11 +2,11 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config -# send ROC to //Alice from Rococo AH to Westend AH -asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 120 seconds +# send 5 ROC to //Alice from Rococo AH to Westend AH +asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-rococo-local 5000000000000" within 120 seconds -# check that //Alice received the ROC on Westend AH -asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 300 seconds +# check that //Alice received at least 4.8 ROC on Westend AH +asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Rococo" within 300 seconds # check that the relayer //Charlie is rewarded by Westend AH bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 30 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl index 77267239c3b12bb1c3caf88f9bb01bfd98f2c121..fedb78cc2103555a1d15c446dd2f08fca94643e1 100644 --- a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl @@ -2,11 +2,11 @@ Description: User is able to transfer WND from Westend Asset Hub to Rococo Asset Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml Creds: config -# send WND to //Alice from Westend AH to Rococo AH -asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 120 seconds +# send 5 WND to //Alice from Westend AH to Rococo AH +asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-westend-local 5000000000000" within 120 seconds -# check that //Alice received the WND on Rococo AH -asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Westend" within 300 seconds +# check that //Alice received at least 4.8 WND on Rococo AH +asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Westend" within 300 seconds # check that the relayer //Charlie is rewarded by Rococo AH bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 30 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl index f72b76e6026b6a0a24fa16fc4d08f79ba07f5e60..68b888b6858e86b8fe846b887bc101e221b2f21d 100644 --- a/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl @@ -2,9 +2,9 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config -# send wROC back to Alice from Westend AH to Rococo AH -asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-westend-local" within 120 seconds +# send 3 wROC back to Alice from Westend AH to Rococo AH +asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-westend-local 3000000000000" within 120 seconds -# check that //Alice received the wROC on Rococo AH +# check that //Alice received at least 2.8 wROC on Rococo AH # (we wait until //Alice account increases here - there are no other transactions that may increase it) -asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 300 seconds +asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,2800000000000" within 300 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl index 9d893c8d90a80df97b33b7ff16297700c7a8253a..1a8a161819542e281094aed0681d52167aaea8e6 100644 --- a/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl @@ -2,9 +2,9 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config -# send wWND back to Alice from Rococo AH to Westend AH -asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-rococo-local" within 120 seconds +# send 3 wWND back to Alice from Rococo AH to Westend AH +asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-rococo-local 3000000000000" within 120 seconds -# check that //Alice received the wWND on Westend AH +# check that //Alice received at least 2.8 wWND on Westend AH # (we wait until //Alice account increases here - there are no other transactions that may increase it) -asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 300 seconds +asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,2800000000000" within 300 seconds diff --git a/cumulus/client/consensus/aura/src/lib.rs b/cumulus/client/consensus/aura/src/lib.rs index 8e4bc658e44bfdc173f3fdb66518da883edcb05b..ed6f5bdd4d6984350c5f59a3753618c3a038f323 100644 --- a/cumulus/client/consensus/aura/src/lib.rs +++ b/cumulus/client/consensus/aura/src/lib.rs @@ -54,7 +54,10 @@ use std::{ mod import_queue; pub use import_queue::{build_verifier, import_queue, BuildVerifierParams, ImportQueueParams}; -pub use sc_consensus_aura::{slot_duration, AuraVerifier, BuildAuraWorkerParams, SlotProportion}; +pub use sc_consensus_aura::{ + slot_duration, standalone::slot_duration_at, AuraVerifier, BuildAuraWorkerParams, + SlotProportion, +}; pub use sc_consensus_slots::InherentDataProviderExt; pub mod collator; diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 0b1e001e007a493f521f10c2659653699da04d0c..98240c92adab38ea103f14699e1e49f7449b3d24 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -24,6 +24,7 @@ polkadot-node-collation-generation = { path = "../../../polkadot/node/collation- polkadot-node-core-runtime-api = { path = "../../../polkadot/node/core/runtime-api" } polkadot-node-core-chain-api = { path = "../../../polkadot/node/core/chain-api" } polkadot-node-core-prospective-parachains = { path = "../../../polkadot/node/core/prospective-parachains" } +polkadot-service = { path = "../../../polkadot/node/service" } # substrate deps sc-authority-discovery = { path = "../../../substrate/client/authority-discovery" } diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs index 5f5bf338ef9907756adb1eab3f0541e870677fe5..f01ef8b05ecba77cf6458fdfe80e966b3a67e6a3 100644 --- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -15,159 +15,29 @@ // along with Polkadot. If not, see . use futures::{select, StreamExt}; -use parking_lot::Mutex; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; -use polkadot_availability_recovery::AvailabilityRecoverySubsystem; -use polkadot_collator_protocol::{CollatorProtocolSubsystem, ProtocolSide}; -use polkadot_network_bridge::{ - Metrics as NetworkBridgeMetrics, NetworkBridgeRx as NetworkBridgeRxSubsystem, - NetworkBridgeTx as NetworkBridgeTxSubsystem, -}; -use polkadot_node_collation_generation::CollationGenerationSubsystem; -use polkadot_node_core_chain_api::ChainApiSubsystem; -use polkadot_node_core_prospective_parachains::ProspectiveParachainsSubsystem; -use polkadot_node_core_runtime_api::RuntimeApiSubsystem; -use polkadot_node_network_protocol::{ - peer_set::{PeerSet, PeerSetProtocolNames}, - request_response::{ - v1::{self, AvailableDataFetchingRequest}, - v2, IncomingRequestReceiver, ReqProtocolNames, - }, -}; -use polkadot_node_subsystem_util::metrics::{prometheus::Registry, Metrics}; use polkadot_overseer::{ - BlockInfo, DummySubsystem, Handle, Overseer, OverseerConnector, OverseerHandle, SpawnGlue, - UnpinHandle, + BlockInfo, Handle, Overseer, OverseerConnector, OverseerHandle, SpawnGlue, UnpinHandle, }; -use polkadot_primitives::CollatorPair; +use polkadot_service::overseer::{collator_overseer_builder, OverseerGenArgs}; -use sc_authority_discovery::Service as AuthorityDiscoveryService; -use sc_network::{NetworkStateInfo, NotificationService}; use sc_service::TaskManager; use sc_utils::mpsc::tracing_unbounded; -use cumulus_primitives_core::relay_chain::{Block, Hash as PHash}; use cumulus_relay_chain_interface::RelayChainError; use crate::BlockChainRpcClient; -/// Arguments passed for overseer construction. -pub(crate) struct CollatorOverseerGenArgs<'a> { - /// Runtime client generic, providing the `ProvieRuntimeApi` trait besides others. - pub runtime_client: Arc, - /// Underlying network service implementation. - pub network_service: Arc>, - /// Syncing oracle. - pub sync_oracle: Box, - /// Underlying authority discovery service. - pub authority_discovery_service: AuthorityDiscoveryService, - /// Receiver for collation request protocol v1. - pub collation_req_receiver_v1: IncomingRequestReceiver, - /// Receiver for collation request protocol v2. - pub collation_req_receiver_v2: IncomingRequestReceiver, - /// Receiver for availability request protocol - pub available_data_req_receiver: IncomingRequestReceiver, - /// Prometheus registry, commonly used for production systems, less so for test. - pub registry: Option<&'a Registry>, - /// Task spawner to be used throughout the overseer and the APIs it provides. - pub spawner: sc_service::SpawnTaskHandle, - /// Determines the behavior of the collator. - pub collator_pair: CollatorPair, - /// Request response protocols - pub req_protocol_names: ReqProtocolNames, - /// Peerset protocols name mapping - pub peer_set_protocol_names: PeerSetProtocolNames, - /// Notification services for validation/collation protocols. - pub notification_services: HashMap>, -} - fn build_overseer( connector: OverseerConnector, - CollatorOverseerGenArgs { - runtime_client, - network_service, - sync_oracle, - authority_discovery_service, - collation_req_receiver_v1, - collation_req_receiver_v2, - available_data_req_receiver, - registry, - spawner, - collator_pair, - req_protocol_names, - peer_set_protocol_names, - notification_services, - }: CollatorOverseerGenArgs<'_>, + args: OverseerGenArgs, ) -> Result< (Overseer, Arc>, OverseerHandle), RelayChainError, > { - let spawner = SpawnGlue(spawner); - let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; - let notification_sinks = Arc::new(Mutex::new(HashMap::new())); - - let builder = Overseer::builder() - .availability_distribution(DummySubsystem) - .availability_recovery(AvailabilityRecoverySubsystem::for_collator( - available_data_req_receiver, - Metrics::register(registry)?, - )) - .availability_store(DummySubsystem) - .bitfield_distribution(DummySubsystem) - .bitfield_signing(DummySubsystem) - .candidate_backing(DummySubsystem) - .candidate_validation(DummySubsystem) - .pvf_checker(DummySubsystem) - .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) - .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) - .collator_protocol({ - let side = ProtocolSide::Collator { - peer_id: network_service.local_peer_id(), - collator_pair, - request_receiver_v1: collation_req_receiver_v1, - request_receiver_v2: collation_req_receiver_v2, - metrics: Metrics::register(registry)?, - }; - CollatorProtocolSubsystem::new(side) - }) - .network_bridge_rx(NetworkBridgeRxSubsystem::new( - network_service.clone(), - authority_discovery_service.clone(), - sync_oracle, - network_bridge_metrics.clone(), - peer_set_protocol_names.clone(), - notification_services, - notification_sinks.clone(), - )) - .network_bridge_tx(NetworkBridgeTxSubsystem::new( - network_service, - authority_discovery_service, - network_bridge_metrics, - req_protocol_names, - peer_set_protocol_names, - notification_sinks, - )) - .provisioner(DummySubsystem) - .runtime_api(RuntimeApiSubsystem::new( - runtime_client.clone(), - Metrics::register(registry)?, - spawner.clone(), - )) - .statement_distribution(DummySubsystem) - .prospective_parachains(ProspectiveParachainsSubsystem::new(Metrics::register(registry)?)) - .approval_distribution(DummySubsystem) - .approval_voting(DummySubsystem) - .gossip_support(DummySubsystem) - .dispute_coordinator(DummySubsystem) - .dispute_distribution(DummySubsystem) - .chain_selection(DummySubsystem) - .activation_external_listeners(Default::default()) - .span_per_active_leaf(Default::default()) - .active_leaves(Default::default()) - .supports_parachains(runtime_client) - .metrics(Metrics::register(registry)?) - .spawner(spawner); + let builder = + collator_overseer_builder(args).map_err(|e| RelayChainError::Application(e.into()))?; builder .build_with_connector(connector) @@ -175,7 +45,7 @@ fn build_overseer( } pub(crate) fn spawn_overseer( - overseer_args: CollatorOverseerGenArgs, + overseer_args: OverseerGenArgs, task_manager: &TaskManager, relay_chain_rpc_client: Arc, ) -> Result { diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index d121d2d3356765d9327fdaa0a8c0563c3917266f..4bccca59fe3ea2eec816513778885f43c9504d51 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use collator_overseer::{CollatorOverseerGenArgs, NewMinimalNode}; +use collator_overseer::NewMinimalNode; use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface, RelayChainResult}; use cumulus_relay_chain_rpc_interface::{RelayChainRpcClient, RelayChainRpcInterface, Url}; @@ -29,6 +29,7 @@ use polkadot_node_network_protocol::{ use polkadot_node_subsystem_util::metrics::prometheus::Registry; use polkadot_primitives::CollatorPair; +use polkadot_service::{overseer::OverseerGenArgs, IsParachainNode}; use sc_authority_discovery::Service as AuthorityDiscoveryService; use sc_network::{config::FullNetworkConfiguration, Event, NetworkEventStream, NetworkService}; @@ -172,10 +173,10 @@ async fn new_minimal_relay_chain( } let genesis_hash = relay_chain_rpc_client.block_get_hash(Some(0)).await?.unwrap_or_default(); - let peer_set_protocol_names = + let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; - let notification_services = peer_sets_info(is_authority, &peer_set_protocol_names) + let notification_services = peer_sets_info(is_authority, &peerset_protocol_names) .into_iter() .map(|(config, (peerset, service))| { net_config.add_notification_protocol(config); @@ -184,14 +185,14 @@ async fn new_minimal_relay_chain( .collect::>>(); let request_protocol_names = ReqProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); - let (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) = + let (collation_req_v1_receiver, collation_req_v2_receiver, available_data_req_receiver) = build_request_response_protocol_receivers(&request_protocol_names, &mut net_config); let best_header = relay_chain_rpc_client .chain_get_header(None) .await? .ok_or_else(|| RelayChainError::RpcCallError("Unable to fetch best header".to_string()))?; - let (network, network_starter, sync_oracle) = build_collator_network( + let (network, network_starter, sync_service) = build_collator_network( &config, net_config, task_manager.spawn_handle(), @@ -208,19 +209,20 @@ async fn new_minimal_relay_chain( prometheus_registry.cloned(), ); - let overseer_args = CollatorOverseerGenArgs { + let overseer_args = OverseerGenArgs { runtime_client: relay_chain_rpc_client.clone(), network_service: network, - sync_oracle, + sync_service, authority_discovery_service, - collation_req_receiver_v1, - collation_req_receiver_v2, + collation_req_v1_receiver, + collation_req_v2_receiver, available_data_req_receiver, registry: prometheus_registry, spawner: task_manager.spawn_handle(), - collator_pair, + is_parachain_node: IsParachainNode::Collator(collator_pair), + overseer_message_channel_capacity_override: None, req_protocol_names: request_protocol_names, - peer_set_protocol_names, + peerset_protocol_names, notification_services, }; @@ -240,10 +242,10 @@ fn build_request_response_protocol_receivers( IncomingRequestReceiver, IncomingRequestReceiver, ) { - let (collation_req_receiver_v1, cfg) = + let (collation_req_v1_receiver, cfg) = IncomingRequest::get_config_receiver(request_protocol_names); config.add_request_response_protocol(cfg); - let (collation_req_receiver_v2, cfg) = + let (collation_req_v2_receiver, cfg) = IncomingRequest::get_config_receiver(request_protocol_names); config.add_request_response_protocol(cfg); let (available_data_req_receiver, cfg) = @@ -251,5 +253,5 @@ fn build_request_response_protocol_receivers( config.add_request_response_protocol(cfg); let cfg = Protocol::ChunkFetchingV1.get_outbound_only_config(request_protocol_names); config.add_request_response_protocol(cfg); - (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) + (collation_req_v1_receiver, collation_req_v2_receiver, available_data_req_receiver) } diff --git a/cumulus/client/relay-chain-minimal-node/src/network.rs b/cumulus/client/relay-chain-minimal-node/src/network.rs index 95785063c1aeb6649d7154fa39e4e111e226def3..7286fab7907cb6d475b81a4dfd97c5d001180873 100644 --- a/cumulus/client/relay-chain-minimal-node/src/network.rs +++ b/cumulus/client/relay-chain-minimal-node/src/network.rs @@ -40,7 +40,11 @@ pub(crate) fn build_collator_network( genesis_hash: Hash, best_header: Header, ) -> Result< - (Arc>, NetworkStarter, Box), + ( + Arc>, + NetworkStarter, + Arc, + ), Error, > { let protocol_id = config.protocol_id(); @@ -112,7 +116,7 @@ pub(crate) fn build_collator_network( let network_starter = NetworkStarter::new(network_start_tx); - Ok((network_service, network_starter, Box::new(SyncOracle {}))) + Ok((network_service, network_starter, Arc::new(SyncOracle {}))) } fn adjust_network_config_light_in_peers(config: &mut NetworkConfiguration) { diff --git a/cumulus/pallets/collator-selection/src/lib.rs b/cumulus/pallets/collator-selection/src/lib.rs index 7449f4d68c7eacc8b07fa45f2f991c13f3d5713b..fb4c4a445df296b3650901326db9edccbbbd61cf 100644 --- a/cumulus/pallets/collator-selection/src/lib.rs +++ b/cumulus/pallets/collator-selection/src/lib.rs @@ -118,7 +118,7 @@ pub mod pallet { use sp_staking::SessionIndex; use sp_std::vec::Vec; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); type BalanceOf = diff --git a/cumulus/pallets/collator-selection/src/migration.rs b/cumulus/pallets/collator-selection/src/migration.rs index 58b4cc5b06a1ab7da17b31ad3c457fd992c59136..f384981dbae8e034a4465ac0f226fb4422be8089 100644 --- a/cumulus/pallets/collator-selection/src/migration.rs +++ b/cumulus/pallets/collator-selection/src/migration.rs @@ -31,8 +31,8 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 0 { + let on_chain_version = Pallet::::on_chain_storage_version(); + if on_chain_version == 0 { let invulnerables_len = Invulnerables::::get().to_vec().len(); >::mutate(|invulnerables| { invulnerables.sort(); @@ -45,7 +45,7 @@ pub mod v1 { invulnerables_len, ); // Similar complexity to `set_invulnerables` (put storage value) - // Plus 1 read for length, 1 read for `onchain_version`, 1 write to put version + // Plus 1 read for length, 1 read for `on_chain_version`, 1 write to put version T::WeightInfo::set_invulnerables(invulnerables_len as u32) .saturating_add(T::DbWeight::get().reads_writes(2, 1)) } else { @@ -83,8 +83,8 @@ pub mod v1 { "after migration, there should be the same number of invulnerables" ); - let onchain_version = Pallet::::on_chain_storage_version(); - frame_support::ensure!(onchain_version >= 1, "must_upgrade"); + let on_chain_version = Pallet::::on_chain_storage_version(); + frame_support::ensure!(on_chain_version >= 1, "must_upgrade"); Ok(()) } diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index d86a67e58f44713dfe7b1c9f5c0952c16d590986..81965a2a313875fd8783cf9e22ca3dd652f688c3 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -205,6 +205,7 @@ pub mod pallet { type OnSystemEvent: OnSystemEvent; /// Returns the parachain ID we are running with. + #[pallet::constant] type SelfParaId: Get; /// The place where outbound XCMP messages come from. This is queried in `finalize_block`. @@ -840,6 +841,7 @@ pub mod pallet { /// /// This data is also absent from the genesis. #[pallet::storage] + #[pallet::disable_try_decode_storage] #[pallet::getter(fn host_configuration)] pub(super) type HostConfiguration = StorageValue<_, AbridgedHostConfiguration>; diff --git a/cumulus/pallets/parachain-system/src/migration.rs b/cumulus/pallets/parachain-system/src/migration.rs index a92f85b9cd420e9e1027d1f82c643d90fea8b97c..30106aceab5a442c34360563fdfb096fdc6ac9b1 100644 --- a/cumulus/pallets/parachain-system/src/migration.rs +++ b/cumulus/pallets/parachain-system/src/migration.rs @@ -21,7 +21,7 @@ use frame_support::{ weights::Weight, }; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); /// Migrates the pallet storage to the most recent version. diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index 5b900769622afa3cba2c47f493ebff1a279b182b..e92169be16b0b8466582f3dd143d9e3e1807a42d 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -600,7 +600,7 @@ impl Pallet { let QueueConfigData { drop_threshold, .. } = >::get(); let fp = T::XcmpQueue::footprint(sender); // Assume that it will not fit into the current page: - let new_pages = fp.pages.saturating_add(1); + let new_pages = fp.ready_pages.saturating_add(1); if new_pages > drop_threshold { // This should not happen since the channel should have been suspended in // [`on_queue_changed`]. @@ -663,12 +663,12 @@ impl OnQueueChanged for Pallet { let mut suspended_channels = >::get(); let suspended = suspended_channels.contains(¶); - if suspended && fp.pages <= resume_threshold { + if suspended && fp.ready_pages <= resume_threshold { Self::send_signal(para, ChannelSignal::Resume); suspended_channels.remove(¶); >::put(suspended_channels); - } else if !suspended && fp.pages >= suspend_threshold { + } else if !suspended && fp.ready_pages >= suspend_threshold { log::warn!("XCMP queue for sibling {:?} is full; suspending channel.", para); Self::send_signal(para, ChannelSignal::Suspend); diff --git a/cumulus/pallets/xcmp-queue/src/migration.rs b/cumulus/pallets/xcmp-queue/src/migration.rs index 6c86c3011d23807adfbde801ec6865b6731822df..c7fa61a3e3f0513e53d67fcee4efd6c3cecf29f6 100644 --- a/cumulus/pallets/xcmp-queue/src/migration.rs +++ b/cumulus/pallets/xcmp-queue/src/migration.rs @@ -24,7 +24,7 @@ use frame_support::{ weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight}, }; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); pub const LOG: &str = "runtime::xcmp-queue-migration"; diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 08ab58ce816046336fcba0a2318bd2ff77227e5f..1fb88cafd93c425833d38c0904f59490cc479294 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -247,6 +247,7 @@ impl> EnqueueMessage for EnqueueToLocalStorage } } footprint.pages = footprint.storage.size as u32 / 16; // Number does not matter + footprint.ready_pages = footprint.pages; footprint } } diff --git a/cumulus/parachain-template/pallets/template/README.md b/cumulus/parachain-template/pallets/template/README.md deleted file mode 100644 index 5a6461233465c327d233fc5d97bdb3a6a85f8fd9..0000000000000000000000000000000000000000 --- a/cumulus/parachain-template/pallets/template/README.md +++ /dev/null @@ -1 +0,0 @@ -License: Unlicense diff --git a/cumulus/parachain-template/pallets/template/src/benchmarking.rs b/cumulus/parachain-template/pallets/template/src/benchmarking.rs deleted file mode 100644 index 8bba2a09867dea1d55e487661c01fd72acdf4dc9..0000000000000000000000000000000000000000 --- a/cumulus/parachain-template/pallets/template/src/benchmarking.rs +++ /dev/null @@ -1,20 +0,0 @@ -//! Benchmarking setup for pallet-parachain-template - -use super::*; - -#[allow(unused)] -use crate::Pallet as Template; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; -use frame_system::RawOrigin; - -benchmarks! { - do_something { - let s in 0 .. 100; - let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), s) - verify { - assert_eq!(Something::::get(), Some(s)); - } -} - -impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test,); diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index fba74b17f9607f58bdb17d7a0b05c8b764a9c4e5..654275eade81e5379004e649e11eb7e4acb2ec1f 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -25,7 +25,9 @@ "/dns/statemine-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL", "/dns/mine14.rotko.net/tcp/33524/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", "/dns/mine14.rotko.net/tcp/34524/ws/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", - "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu" + "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", + "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30511/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU", + "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30513/wss/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index 685a00ddc7145ed650f7cb5496fe81cfb23f0ffb..454060d2a87afb84407323263eea97d2f12d5a68 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -25,7 +25,9 @@ "/dns/statemint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx", "/dns/mint14.rotko.net/tcp/33514/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", "/dns/mint14.rotko.net/tcp/34514/ws/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", - "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW" + "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", + "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30508/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R", + "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30510/wss/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index 6f42b5f7d8bb4a76c7390b538a68b1bb0acc1613..670935c9d2474242307d691d8707030c70f49982 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -23,7 +23,9 @@ "/dns/westmint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk", "/dns/wmint14.rotko.net/tcp/33534/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", "/dns/wmint14.rotko.net/tcp/34534/ws/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", - "/dns/wmint14.rotko.net/tcp/35534/wss/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN" + "/dns/wmint14.rotko.net/tcp/35534/wss/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", + "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30514/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk", + "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30516/wss/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 0ef81806cc5c08621f3c0a308425c72f836c3ef4..90b70b05016d52aeecaea4870f438bf980f7263b 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -25,7 +25,9 @@ "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW", "/dns/kbr13.rotko.net/tcp/33553/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", "/dns/kbr13.rotko.net/tcp/34553/ws/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", - "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66" + "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", + "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30520/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV", + "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index 130bdf31ef211ba6d353fa97944b2bf80320997d..a9444b89e1e2cce9c79a367c4c561e910f1f28f7 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -21,7 +21,9 @@ "/dns/bridgehub-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWPNZm78tWUmKbta3SXdkqTPsquRc8ekEbJjZsGGi7YiRi", "/dns/pbr13.rotko.net/tcp/33543/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", "/dns/pbr13.rotko.net/tcp/34543/ws/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", - "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw" + "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", + "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30517/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", + "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30519/wss/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index 018ab0ee6fd9810595c841237dd253b9463aed79..447207a58107a95bcd5fca173d9d5476a0ce9cab 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -19,7 +19,9 @@ "/dns/bridgehub-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWBsBArCMxmQyo3feCEqMWuwyhb2LTRK8hmCCJxgrNeMke", "/dns/wbr13.rotko.net/tcp/33563/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", "/dns/wbr13.rotko.net/tcp/34563/ws/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", - "/dns/wbr13.rotko.net/tcp/35563/wss/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD" + "/dns/wbr13.rotko.net/tcp/35563/wss/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", + "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30523/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj", + "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30525/wss/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index e9f690234e4381f54377c7fe7174f25834a973a5..259669cf37a0643534eaa5d1776daf79ec402d75 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -25,7 +25,9 @@ "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", "/dns/pch13.rotko.net/tcp/33573/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", "/dns/pch13.rotko.net/tcp/34573/ws/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", - "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH" + "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", + "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30526/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", + "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-westend.json b/cumulus/parachains/chain-specs/collectives-westend.json index ffe73b5a05a391cc1c5396c235460830c023297a..e459c631f8be9df7c5c52993c116f11ef619fefe 100644 --- a/cumulus/parachains/chain-specs/collectives-westend.json +++ b/cumulus/parachains/chain-specs/collectives-westend.json @@ -25,7 +25,9 @@ "/dns/westend-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWPFM93jgm4pgxx8PM8WJKAJF49qia8jRB95uciUQwYh7m", "/dns/wch13.rotko.net/tcp/33593/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", "/dns/wch13.rotko.net/tcp/34593/ws/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", - "/dns/wch13.rotko.net/tcp/35593/wss/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr" + "/dns/wch13.rotko.net/tcp/35593/wss/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", + "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30529/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn", + "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30531/wss/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-westend.json b/cumulus/parachains/chain-specs/people-westend.json index fa29853c70b05e47df50445935be42a4637f240d..29fa0c9cde79c0daecbce029c8fa377ba1ff6918 100644 --- a/cumulus/parachains/chain-specs/people-westend.json +++ b/cumulus/parachains/chain-specs/people-westend.json @@ -10,7 +10,9 @@ "/dns/westend-people-collator-node-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWGVYTVKW7tYe51JvetvGvVLDPXzqQX1mueJgz14FgkmHG", "/dns/westend-people-collator-node-2.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWGVYTVKW7tYe51JvetvGvVLDPXzqQX1mueJgz14FgkmHG", "/dns/westend-people-collator-node-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWCF1eA2Gap69zgXD7Df3e9DqDUsGoByocggTGejoHjK23", - "/dns/westend-people-collator-node-3.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWCF1eA2Gap69zgXD7Df3e9DqDUsGoByocggTGejoHjK23" + "/dns/westend-people-collator-node-3.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWCF1eA2Gap69zgXD7Df3e9DqDUsGoByocggTGejoHjK23", + "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30532/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb", + "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30534/wss/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb" ], "telemetryEndpoints": null, "protocolId": null, @@ -79,4 +81,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/cumulus/parachains/pallets/collective-content/src/lib.rs b/cumulus/parachains/pallets/collective-content/src/lib.rs index 7a685858accb67868837e734ba9808741a4f7ea1..b1c960ad6a0d337d6b84aaaba59f7fa0625a8124 100644 --- a/cumulus/parachains/pallets/collective-content/src/lib.rs +++ b/cumulus/parachains/pallets/collective-content/src/lib.rs @@ -59,7 +59,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; use sp_runtime::{traits::BadOrigin, Saturating}; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); #[pallet::pallet] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 05936e93993231429d738edf15acc48150bcc542..3eb63a24b74e9eee4ded63cc591ec6ba8adb136b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -119,6 +119,7 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-asset-conversion-tx-payment/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", @@ -130,6 +131,7 @@ runtime-benchmarks = [ "pallet-proxy/runtime-benchmarks", "pallet-state-trie-migration/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 81bb66a56a29a6d4f0669234aadfcf1b55157275..32966ab6341d7bf4b2efae0cf008cfa6dfb67275 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -113,7 +113,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, @@ -178,6 +178,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; @@ -234,6 +235,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -761,6 +763,9 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime { type Fungibles = LocalAndForeignAssets; type OnChargeAssetTransaction = AssetConversionAdapter; + type WeightInfo = weights::pallet_asset_conversion_tx_payment::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetConversionTxHelper; } parameter_types! { @@ -948,8 +953,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -961,7 +966,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( pallet_collator_selection::migration::v1::MigrateToV1, @@ -986,37 +991,37 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if Multisig::on_chain_storage_version() == StorageVersion::new(0) { - Multisig::current_storage_version().put::(); + Multisig::in_code_storage_version().put::(); writes.saturating_inc(); } if Assets::on_chain_storage_version() == StorageVersion::new(0) { - Assets::current_storage_version().put::(); + Assets::in_code_storage_version().put::(); writes.saturating_inc(); } if Uniques::on_chain_storage_version() == StorageVersion::new(0) { - Uniques::current_storage_version().put::(); + Uniques::in_code_storage_version().put::(); writes.saturating_inc(); } if Nfts::on_chain_storage_version() == StorageVersion::new(0) { - Nfts::current_storage_version().put::(); + Nfts::in_code_storage_version().put::(); writes.saturating_inc(); } if ForeignAssets::on_chain_storage_version() == StorageVersion::new(0) { - ForeignAssets::current_storage_version().put::(); + ForeignAssets::in_code_storage_version().put::(); writes.saturating_inc(); } if PoolAssets::on_chain_storage_version() == StorageVersion::new(0) { - PoolAssets::current_storage_version().put::(); + PoolAssets::in_code_storage_version().put::(); writes.saturating_inc(); } @@ -1034,14 +1039,77 @@ pub type Executive = frame_executive::Executive< Migrations, >; +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetConversionTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl + pallet_asset_conversion_tx_payment::BenchmarkHelperTrait< + AccountId, + xcm::v3::MultiLocation, + xcm::v3::MultiLocation, + > for AssetConversionTxHelper +{ + fn create_asset_id_parameter(seed: u32) -> (xcm::v3::MultiLocation, xcm::v3::MultiLocation) { + // Use a different parachain' foreign assets pallet so that the asset is indeed foreign. + let asset_id = xcm::v3::MultiLocation::new( + 1, + xcm::v3::Junctions::X3( + xcm::v3::Junction::Parachain(3000), + xcm::v3::Junction::PalletInstance(53), + xcm::v3::Junction::GeneralIndex(seed.into()), + ), + ); + (asset_id, asset_id) + } + + fn setup_balances_and_pool(asset_id: xcm::v3::MultiLocation, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + assert_ok!(ForeignAssets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&lp_provider, u64::MAX.into()); + assert_ok!(ForeignAssets::mint_into(asset_id.into(), &lp_provider, u64::MAX.into())); + + let token_native = Box::new(TokenLocationV3::get()); + let token_second = Box::new(asset_id); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + (u32::MAX / 8).into(), // 1 desired + u32::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + #[cfg(feature = "runtime-benchmarks")] mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_assets, Local] [pallet_assets, Foreign] [pallet_assets, Pool] [pallet_asset_conversion, AssetConversion] + [pallet_asset_conversion_tx_payment, AssetTxPayment] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -1052,6 +1120,7 @@ mod benches { [pallet_uniques, Uniques] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -1093,7 +1162,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -1302,6 +1371,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; @@ -1336,6 +1406,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1454,6 +1525,13 @@ impl_runtime_apis! { }); Some((assets, fee_index as u32, dest, verify)) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } impl XcmBridgeHubRouterConfig for Runtime { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..ed2c5f3056e3c44601fee945ccda3eab35444e12 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --chain=asset-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs index fa9e86102c619c9ff68316cae2a27a7f79fea2e6..134b5341a401ca0f0918d3f04a79d2a4d1d9622d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs @@ -19,7 +19,9 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_conversion; +pub mod pallet_asset_conversion_tx_payment; pub mod pallet_assets_foreign; pub mod pallet_assets_local; pub mod pallet_assets_pool; @@ -32,6 +34,7 @@ pub mod pallet_nfts; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_uniques; pub mod pallet_utility; pub mod pallet_xcm; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_tx_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_tx_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..0a639b368af2248bdaf1efa7538d58a935dbe2e0 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_tx_payment.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_asset_conversion_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2024-01-04, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Georges-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_asset_conversion_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --chain=asset-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion_tx_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion_tx_payment::WeightInfo for WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 209_000_000 picoseconds. + Weight::from_parts(212_000_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `631` + // Estimated: `7404` + // Minimum execution time: 1_228_000_000 picoseconds. + Weight::from_parts(1_268_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..035f9a6dbe5167cb651736f705b817d4ba9027c4 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --chain=asset-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs index f8820bbb58cb24afe1afe034e131414368089444..51b6543bae82ba496998706ab6c2aaf6e0ff604b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 25_003_000 picoseconds. - Weight::from_parts(25_800_000, 0) + // Minimum execution time: 22_136_000 picoseconds. + Weight::from_parts(22_518_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 88_832_000 picoseconds. - Weight::from_parts(90_491_000, 0) + // Minimum execution time: 92_277_000 picoseconds. + Weight::from_parts(94_843_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -118,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `400` // Estimated: `6196` - // Minimum execution time: 138_911_000 picoseconds. - Weight::from_parts(142_483_000, 0) + // Minimum execution time: 120_110_000 picoseconds. + Weight::from_parts(122_968_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 146_932_000 picoseconds. - Weight::from_parts(153_200_000, 0) + // Minimum execution time: 143_116_000 picoseconds. + Weight::from_parts(147_355_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -170,8 +170,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_081_000 picoseconds. - Weight::from_parts(7_397_000, 0) + // Minimum execution time: 6_517_000 picoseconds. + Weight::from_parts(6_756_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -181,8 +181,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_007_000 picoseconds. - Weight::from_parts(2_183_000, 0) + // Minimum execution time: 1_894_000 picoseconds. + Weight::from_parts(2_024_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -208,8 +208,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_790_000 picoseconds. - Weight::from_parts(29_767_000, 0) + // Minimum execution time: 27_314_000 picoseconds. + Weight::from_parts(28_787_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -234,8 +234,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 30_951_000 picoseconds. - Weight::from_parts(31_804_000, 0) + // Minimum execution time: 29_840_000 picoseconds. + Weight::from_parts(30_589_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -246,45 +246,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_164_000 picoseconds. - Weight::from_parts(2_311_000, 0) + // Minimum execution time: 1_893_000 picoseconds. + Weight::from_parts(2_017_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `162` - // Estimated: `11052` - // Minimum execution time: 16_906_000 picoseconds. - Weight::from_parts(17_612_000, 0) - .saturating_add(Weight::from_parts(0, 11052)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `159` + // Estimated: `13524` + // Minimum execution time: 19_211_000 picoseconds. + Weight::from_parts(19_552_000, 0) + .saturating_add(Weight::from_parts(0, 13524)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `11056` - // Minimum execution time: 17_443_000 picoseconds. - Weight::from_parts(18_032_000, 0) - .saturating_add(Weight::from_parts(0, 11056)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `163` + // Estimated: `13528` + // Minimum execution time: 19_177_000 picoseconds. + Weight::from_parts(19_704_000, 0) + .saturating_add(Weight::from_parts(0, 13528)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `13538` - // Minimum execution time: 18_992_000 picoseconds. - Weight::from_parts(19_464_000, 0) - .saturating_add(Weight::from_parts(0, 13538)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16013` + // Minimum execution time: 20_449_000 picoseconds. + Weight::from_parts(21_075_000, 0) + .saturating_add(Weight::from_parts(0, 16013)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -304,36 +304,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 28_011_000 picoseconds. - Weight::from_parts(28_716_000, 0) + // Minimum execution time: 26_578_000 picoseconds. + Weight::from_parts(27_545_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `206` - // Estimated: `8621` - // Minimum execution time: 9_533_000 picoseconds. - Weight::from_parts(9_856_000, 0) - .saturating_add(Weight::from_parts(0, 8621)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11096` + // Minimum execution time: 11_646_000 picoseconds. + Weight::from_parts(11_944_000, 0) + .saturating_add(Weight::from_parts(0, 11096)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `11063` - // Minimum execution time: 17_628_000 picoseconds. - Weight::from_parts(18_146_000, 0) - .saturating_add(Weight::from_parts(0, 11063)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `170` + // Estimated: `13535` + // Minimum execution time: 19_301_000 picoseconds. + Weight::from_parts(19_664_000, 0) + .saturating_add(Weight::from_parts(0, 13535)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -349,12 +349,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `11105` - // Minimum execution time: 34_877_000 picoseconds. - Weight::from_parts(35_607_000, 0) - .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `212` + // Estimated: `13577` + // Minimum execution time: 35_715_000 picoseconds. + Weight::from_parts(36_915_000, 0) + .saturating_add(Weight::from_parts(0, 13577)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -365,8 +365,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 5_370_000 picoseconds. - Weight::from_parts(5_616_000, 0) + // Minimum execution time: 4_871_000 picoseconds. + Weight::from_parts(5_066_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -377,10 +377,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 26_820_000 picoseconds. - Weight::from_parts(27_143_000, 0) + // Minimum execution time: 25_150_000 picoseconds. + Weight::from_parts(26_119_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 38_248_000 picoseconds. + Weight::from_parts(39_122_000, 0) + .saturating_add(Weight::from_parts(0, 3625)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 9356371cd9a5b2d032cd1ef7ff39ed2cb0ce0f1f..5a0a5c7b2cf1fc333514c7655b5deb3232faa768 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -112,6 +112,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "hex-literal", + "pallet-asset-conversion-tx-payment/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", @@ -124,6 +125,7 @@ runtime-benchmarks = [ "pallet-proxy/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 375be1d83a957b572a21d4310ecb1798ec3f8cef..291a162a25032cde83e282b6d005a4551e1f1211 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -112,7 +112,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, @@ -164,6 +164,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; @@ -220,6 +221,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -737,6 +739,9 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime { type Fungibles = LocalAndForeignAssets; type OnChargeAssetTransaction = AssetConversionAdapter; + type WeightInfo = weights::pallet_asset_conversion_tx_payment::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetConversionTxHelper; } parameter_types! { @@ -975,8 +980,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -988,7 +993,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -1092,17 +1097,17 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if ForeignAssets::on_chain_storage_version() == StorageVersion::new(0) { - ForeignAssets::current_storage_version().put::(); + ForeignAssets::in_code_storage_version().put::(); writes.saturating_inc(); } if PoolAssets::on_chain_storage_version() == StorageVersion::new(0) { - PoolAssets::current_storage_version().put::(); + PoolAssets::in_code_storage_version().put::(); writes.saturating_inc(); } @@ -1120,14 +1125,77 @@ pub type Executive = frame_executive::Executive< Migrations, >; +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetConversionTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl + pallet_asset_conversion_tx_payment::BenchmarkHelperTrait< + AccountId, + xcm::v3::MultiLocation, + xcm::v3::MultiLocation, + > for AssetConversionTxHelper +{ + fn create_asset_id_parameter(seed: u32) -> (xcm::v3::MultiLocation, xcm::v3::MultiLocation) { + // Use a different parachain' foreign assets pallet so that the asset is indeed foreign. + let asset_id = xcm::v3::MultiLocation::new( + 1, + xcm::v3::Junctions::X3( + xcm::v3::Junction::Parachain(3000), + xcm::v3::Junction::PalletInstance(53), + xcm::v3::Junction::GeneralIndex(seed.into()), + ), + ); + (asset_id, asset_id) + } + + fn setup_balances_and_pool(asset_id: xcm::v3::MultiLocation, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + assert_ok!(ForeignAssets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&lp_provider, u64::MAX.into()); + assert_ok!(ForeignAssets::mint_into(asset_id.into(), &lp_provider, u64::MAX.into())); + + let token_native = Box::new(xcm::v3::MultiLocation::new(1, xcm::v3::Junctions::Here)); + let token_second = Box::new(asset_id); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + (u32::MAX / 2).into(), // 1 desired + u32::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + #[cfg(feature = "runtime-benchmarks")] mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_assets, Local] [pallet_assets, Foreign] [pallet_assets, Pool] [pallet_asset_conversion, AssetConversion] + [pallet_asset_conversion_tx_payment, AssetTxPayment] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -1138,6 +1206,7 @@ mod benches { [pallet_uniques, Uniques] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -1181,7 +1250,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -1436,6 +1505,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; @@ -1470,6 +1540,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1583,6 +1654,13 @@ impl_runtime_apis! { }); Some((assets, fee_index as u32, dest, verify)) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use pallet_xcm_bridge_hub_router::benchmarking::{ diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..46f4f2bfd9668ce7316bf1a4a7a0c71833fd7f7d --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --chain=asset-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_206_000 picoseconds. + Weight::from_parts(6_212_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_851_000 picoseconds. + Weight::from_parts(8_847_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 631_000 picoseconds. + Weight::from_parts(3_086_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_446_000 picoseconds. + Weight::from_parts(5_911_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 481_000 picoseconds. + Weight::from_parts(2_916_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_927_000 picoseconds. + Weight::from_parts(6_613_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs index a2c0842047fc0e65a88d0e55bc32fecdfa1958bb..afd529173a6fc888a84dc9373ff829c9654d4229 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs @@ -18,7 +18,9 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_conversion; +pub mod pallet_asset_conversion_tx_payment; pub mod pallet_assets_foreign; pub mod pallet_assets_local; pub mod pallet_assets_pool; @@ -33,6 +35,7 @@ pub mod pallet_proxy; pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_uniques; pub mod pallet_utility; pub mod pallet_xcm; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_tx_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_tx_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..8fe302630fb9510d0e1c01ca9de37a1bd0be3a4f --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_tx_payment.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_asset_conversion_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2024-01-04, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Georges-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_asset_conversion_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --chain=asset-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion_tx_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion_tx_payment::WeightInfo for WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 214_000_000 picoseconds. + Weight::from_parts(219_000_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `631` + // Estimated: `7404` + // Minimum execution time: 1_211_000_000 picoseconds. + Weight::from_parts(1_243_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..b4c78a78b489073648156133730bd39cd3c68497 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --chain=asset-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 40_847_000 picoseconds. + Weight::from_parts(49_674_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index 504731f4a9ef743e62090582901a63f7aee78829..71facff87d35f350114850653acfbebeb5c29529 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 25_482_000 picoseconds. - Weight::from_parts(26_622_000, 0) + // Minimum execution time: 21_630_000 picoseconds. + Weight::from_parts(22_306_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 87_319_000 picoseconds. - Weight::from_parts(89_764_000, 0) + // Minimum execution time: 91_802_000 picoseconds. + Weight::from_parts(93_672_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -118,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `367` // Estimated: `6196` - // Minimum execution time: 139_133_000 picoseconds. - Weight::from_parts(141_507_000, 0) + // Minimum execution time: 118_930_000 picoseconds. + Weight::from_parts(122_306_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 144_241_000 picoseconds. - Weight::from_parts(149_709_000, 0) + // Minimum execution time: 140_527_000 picoseconds. + Weight::from_parts(144_501_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -158,8 +158,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_392_000 picoseconds. - Weight::from_parts(10_779_000, 0) + // Minimum execution time: 7_556_000 picoseconds. + Weight::from_parts(7_798_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) @@ -168,8 +168,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_088_000 picoseconds. - Weight::from_parts(7_257_000, 0) + // Minimum execution time: 6_373_000 picoseconds. + Weight::from_parts(6_603_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +179,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_095_000 picoseconds. - Weight::from_parts(2_136_000, 0) + // Minimum execution time: 1_941_000 picoseconds. + Weight::from_parts(2_088_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -206,8 +206,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_728_000 picoseconds. - Weight::from_parts(29_349_000, 0) + // Minimum execution time: 27_080_000 picoseconds. + Weight::from_parts(27_820_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -232,8 +232,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 30_605_000 picoseconds. - Weight::from_parts(31_477_000, 0) + // Minimum execution time: 28_850_000 picoseconds. + Weight::from_parts(29_506_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -244,45 +244,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_137_000 picoseconds. - Weight::from_parts(2_303_000, 0) + // Minimum execution time: 2_033_000 picoseconds. + Weight::from_parts(2_201_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `162` - // Estimated: `11052` - // Minimum execution time: 16_719_000 picoseconds. - Weight::from_parts(17_329_000, 0) - .saturating_add(Weight::from_parts(0, 11052)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `159` + // Estimated: `13524` + // Minimum execution time: 18_844_000 picoseconds. + Weight::from_parts(19_197_000, 0) + .saturating_add(Weight::from_parts(0, 13524)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `11056` - // Minimum execution time: 16_687_000 picoseconds. - Weight::from_parts(17_405_000, 0) - .saturating_add(Weight::from_parts(0, 11056)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `163` + // Estimated: `13528` + // Minimum execution time: 18_940_000 picoseconds. + Weight::from_parts(19_450_000, 0) + .saturating_add(Weight::from_parts(0, 13528)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `13538` - // Minimum execution time: 18_751_000 picoseconds. - Weight::from_parts(19_130_000, 0) - .saturating_add(Weight::from_parts(0, 13538)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16013` + // Minimum execution time: 20_521_000 picoseconds. + Weight::from_parts(21_076_000, 0) + .saturating_add(Weight::from_parts(0, 16013)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -302,36 +302,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 27_189_000 picoseconds. - Weight::from_parts(27_760_000, 0) + // Minimum execution time: 26_007_000 picoseconds. + Weight::from_parts(26_448_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `206` - // Estimated: `8621` - // Minimum execution time: 9_307_000 picoseconds. - Weight::from_parts(9_691_000, 0) - .saturating_add(Weight::from_parts(0, 8621)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11096` + // Minimum execution time: 11_584_000 picoseconds. + Weight::from_parts(12_080_000, 0) + .saturating_add(Weight::from_parts(0, 11096)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `11063` - // Minimum execution time: 17_607_000 picoseconds. - Weight::from_parts(18_090_000, 0) - .saturating_add(Weight::from_parts(0, 11063)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `170` + // Estimated: `13535` + // Minimum execution time: 19_157_000 picoseconds. + Weight::from_parts(19_513_000, 0) + .saturating_add(Weight::from_parts(0, 13535)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -347,12 +347,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `11105` - // Minimum execution time: 34_322_000 picoseconds. - Weight::from_parts(35_754_000, 0) - .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `212` + // Estimated: `13577` + // Minimum execution time: 34_878_000 picoseconds. + Weight::from_parts(35_623_000, 0) + .saturating_add(Weight::from_parts(0, 13577)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -363,8 +363,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_513_000 picoseconds. - Weight::from_parts(4_754_000, 0) + // Minimum execution time: 3_900_000 picoseconds. + Weight::from_parts(4_161_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -375,10 +375,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 27_860_000 picoseconds. - Weight::from_parts(28_279_000, 0) + // Minimum execution time: 25_731_000 picoseconds. + Weight::from_parts(26_160_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 37_251_000 picoseconds. + Weight::from_parts(38_075_000, 0) + .saturating_add(Weight::from_parts(0, 3625)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 7a1951fd24bd2d74ae722eef31b81dfdb28c8d9b..242627815d3a5ced13a99d9f7f7c075e55c63200 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -242,6 +242,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm-bridge-hub/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs index 6dbf96edc2ab0360385b8e04bf1dc52732abd9ca..323e6ed65e69c22ae6be6f80bb26b4e6455bbb94 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -40,7 +40,7 @@ use bridge_runtime_common::{ XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, }, refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedGrandpaMessages, RefundSignedExtensionAdapter, + ActualFeeRefund, RefundBridgedGrandpaMessages, RefundTransactionExtensionAdapter, RefundableMessagesLane, }, }; @@ -168,7 +168,7 @@ impl messages::BridgedChainWithMessages for RococoBulletin {} /// Signed extension that refunds relayers that are delivering messages from the Rococo Bulletin /// chain. -pub type OnBridgeHubRococoRefundRococoBulletinMessages = RefundSignedExtensionAdapter< +pub type OnBridgeHubRococoRefundRococoBulletinMessages = RefundTransactionExtensionAdapter< RefundBridgedGrandpaMessages< Runtime, BridgeGrandpaRococoBulletinInstance, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index 5d55d7afbacfdb22f6939c88e87eaf64321945ff..05d1aa188d2d3abd114431e6ee388473dca8f982 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -39,7 +39,7 @@ use bridge_runtime_common::{ XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, }, refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, + ActualFeeRefund, RefundBridgedParachainMessages, RefundTransactionExtensionAdapter, RefundableMessagesLane, RefundableParachain, }, }; @@ -173,7 +173,7 @@ impl UnderlyingChainProvider for BridgeHubWestend { impl messages::BridgedChainWithMessages for BridgeHubWestend {} /// Signed extension that refunds relayers that are delivering messages from the Westend parachain. -pub type OnBridgeHubRococoRefundBridgeHubWestendMessages = RefundSignedExtensionAdapter< +pub type OnBridgeHubRococoRefundBridgeHubWestendMessages = RefundTransactionExtensionAdapter< RefundBridgedParachainMessages< Runtime, RefundableParachain< diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 15cf045a99561a0c9bf0828e59e10f76d1916599..16e3b2e8600f6df3721926b3ab597023d443bccc 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -102,8 +102,6 @@ use polkadot_runtime_common::prod_or_fast; #[cfg(feature = "runtime-benchmarks")] use benchmark_helpers::DoNothingRouter; -#[cfg(not(feature = "runtime-benchmarks"))] -use bridge_hub_common::BridgeHubMessageRouter; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -117,8 +115,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -136,7 +134,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -170,12 +168,12 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if Balances::on_chain_storage_version() == StorageVersion::new(0) { - Balances::current_storage_version().put::(); + Balances::in_code_storage_version().put::(); writes.saturating_inc(); } @@ -204,7 +202,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 4, @@ -264,6 +262,8 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -326,6 +326,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -367,11 +368,15 @@ parameter_types! { impl pallet_message_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_message_queue::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] + // Use the NoopMessageProcessor exclusively for benchmarks, not for tests with the + // runtime-benchmarks feature as tests require the BridgeHubMessageRouter to process messages. + // The "test" feature flag doesn't work, hence the reliance on the "std" feature, which is + // enabled during tests. + #[cfg(all(not(feature = "std"), feature = "runtime-benchmarks"))] type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = BridgeHubMessageRouter< + #[cfg(not(all(not(feature = "std"), feature = "runtime-benchmarks")))] + type MessageProcessor = bridge_hub_common::BridgeHubMessageRouter< xcm_builder::ProcessXcmMessage< AggregateMessageOrigin, xcm_executor::XcmExecutor, @@ -757,12 +762,14 @@ bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -814,7 +821,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -1063,6 +1070,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -1093,6 +1101,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1146,6 +1155,13 @@ impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use xcm::latest::prelude::*; @@ -1469,16 +1485,16 @@ mod tests { use codec::Encode; use sp_runtime::{ generic::Era, - traits::{SignedExtension, Zero}, + traits::{TransactionExtensionBase, Zero}, }; #[test] fn ensure_signed_extension_definition_is_compatible_with_relay() { - use bp_polkadot_core::SuffixedCommonSignedExtensionExt; + use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; sp_io::TestExternalities::default().execute_with(|| { frame_system::BlockHash::::insert(BlockNumber::zero(), Hash::default()); - let payload: SignedExtra = ( + let payload: TxExtension = ( frame_system::CheckNonZeroSender::new(), frame_system::CheckSpecVersion::new(), frame_system::CheckTxVersion::new(), @@ -1492,11 +1508,11 @@ mod tests { bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), ) - ); + ).into(); // for BridgeHubRococo { - let bhr_indirect_payload = bp_bridge_hub_rococo::SignedExtension::from_params( + let bhr_indirect_payload = bp_bridge_hub_rococo::TransactionExtension::from_params( VERSION.spec_version, VERSION.transaction_version, bp_runtime::TransactionEra::Immortal, @@ -1507,8 +1523,8 @@ mod tests { ); assert_eq!(payload.encode(), bhr_indirect_payload.encode()); assert_eq!( - payload.additional_signed().unwrap().encode(), - bhr_indirect_payload.additional_signed().unwrap().encode() + payload.implicit().unwrap().encode(), + bhr_indirect_payload.implicit().unwrap().encode() ) } }); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..99e3d6aeba02349af3f4413a2e35d5a81a2a5073 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --chain=bridge-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_136_000 picoseconds. + Weight::from_parts(5_842_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_771_000 picoseconds. + Weight::from_parts(8_857_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 732_000 picoseconds. + Weight::from_parts(2_875_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_627_000 picoseconds. + Weight::from_parts(6_322_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 471_000 picoseconds. + Weight::from_parts(2_455_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 491_000 picoseconds. + Weight::from_parts(2_916_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_798_000 picoseconds. + Weight::from_parts(6_272_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs index aac39a4564fb600d9c4f623aa3ba27c78fc8f5fc..d97a30db9541bb01919f0a7a054f42d95816e875 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs @@ -25,6 +25,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_bridge_grandpa; pub mod pallet_bridge_messages_rococo_to_rococo_bulletin; @@ -36,6 +37,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..71d17e7259f72c023be3d75f37300c1a45c41d6f --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --chain=bridge-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `3` + // Estimated: `3593` + // Minimum execution time: 34_956_000 picoseconds. + Weight::from_parts(40_788_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs index 5faded42aa82df52f403b68de2a470ad4a5a17b7..a732e1a573439c4b658191697024ac3c396c9de5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_683_000 picoseconds. - Weight::from_parts(24_199_000, 0) + // Minimum execution time: 18_513_000 picoseconds. + Weight::from_parts(19_156_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 89_524_000 picoseconds. - Weight::from_parts(91_401_000, 0) + // Minimum execution time: 88_096_000 picoseconds. + Weight::from_parts(89_732_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 91_890_000 picoseconds. - Weight::from_parts(93_460_000, 0) + // Minimum execution time: 88_239_000 picoseconds. + Weight::from_parts(89_729_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_152_000 picoseconds. - Weight::from_parts(7_355_000, 0) + // Minimum execution time: 5_955_000 picoseconds. + Weight::from_parts(6_266_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_081_000 picoseconds. - Weight::from_parts(2_258_000, 0) + // Minimum execution time: 1_868_000 picoseconds. + Weight::from_parts(1_961_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 28_067_000 picoseconds. - Weight::from_parts(28_693_000, 0) + // Minimum execution time: 24_388_000 picoseconds. + Weight::from_parts(25_072_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 30_420_000 picoseconds. - Weight::from_parts(31_373_000, 0) + // Minimum execution time: 26_762_000 picoseconds. + Weight::from_parts(27_631_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,45 +224,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_087_000 picoseconds. - Weight::from_parts(2_243_000, 0) + // Minimum execution time: 1_856_000 picoseconds. + Weight::from_parts(2_033_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 15_142_000 picoseconds. - Weight::from_parts(15_598_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 17_718_000 picoseconds. + Weight::from_parts(18_208_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 15_041_000 picoseconds. - Weight::from_parts(15_493_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 17_597_000 picoseconds. + Weight::from_parts(18_090_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 16_624_000 picoseconds. - Weight::from_parts(17_031_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 19_533_000 picoseconds. + Weight::from_parts(20_164_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -282,36 +282,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 26_398_000 picoseconds. - Weight::from_parts(26_847_000, 0) + // Minimum execution time: 24_958_000 picoseconds. + Weight::from_parts(25_628_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_741_000 picoseconds. - Weight::from_parts(8_954_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 12_209_000 picoseconds. + Weight::from_parts(12_612_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 15_306_000 picoseconds. - Weight::from_parts(15_760_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 17_844_000 picoseconds. + Weight::from_parts(18_266_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -327,12 +327,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 33_127_000 picoseconds. - Weight::from_parts(33_938_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 34_131_000 picoseconds. + Weight::from_parts(34_766_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -343,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_290_000 picoseconds. - Weight::from_parts(4_450_000, 0) + // Minimum execution time: 3_525_000 picoseconds. + Weight::from_parts(3_724_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,10 +355,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 26_408_000 picoseconds. - Weight::from_parts(26_900_000, 0) + // Minimum execution time: 24_975_000 picoseconds. + Weight::from_parts(25_517_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 33_761_000 picoseconds. + Weight::from_parts(34_674_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs index b9f43624b652f97ec1616664548baaadb5e4c05f..46c5df18a1586e77d0bc0f9eb5f9ed19fa689f59 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs @@ -22,7 +22,7 @@ use bridge_hub_rococo_runtime::{ bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages, xcm_config::XcmConfig, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, MessageQueueServiceWeight, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, - SignedExtra, UncheckedExtrinsic, + TxExtension, UncheckedExtrinsic, }; use codec::{Decode, Encode}; use cumulus_primitives_core::XcmError::{FailedToTransactAsset, NotHoldingFees}; @@ -51,6 +51,7 @@ fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys( + 11155111, collator_session_keys(), 1013, 1000, @@ -69,6 +70,7 @@ pub fn transfer_token_to_ethereum_works() { #[test] pub fn unpaid_transfer_token_to_ethereum_fails_with_barrier() { snowbridge_runtime_test_common::send_unpaid_transfer_token_message::( + 11155111, collator_session_keys(), 1013, 1000, @@ -80,6 +82,7 @@ pub fn unpaid_transfer_token_to_ethereum_fails_with_barrier() { #[test] pub fn transfer_token_to_ethereum_fee_not_enough() { snowbridge_runtime_test_common::send_transfer_token_message_failure::( + 11155111, collator_session_keys(), 1013, 1000, @@ -95,6 +98,7 @@ pub fn transfer_token_to_ethereum_fee_not_enough() { #[test] pub fn transfer_token_to_ethereum_insufficient_fund() { snowbridge_runtime_test_common::send_transfer_token_message_failure::( + 11155111, collator_session_keys(), 1013, 1000, @@ -146,6 +150,7 @@ pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works() { XcmConfig, AllPalletsWithoutSystem, >( + 11155111, collator_session_keys(), 1013, 1000, @@ -166,7 +171,7 @@ fn construct_extrinsic( call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -183,13 +188,13 @@ fn construct_extrinsic( OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), ); - let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); + let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); UncheckedExtrinsic::new_signed( call, account_id.into(), Signature::Sr25519(signature.clone()), - extra, + tx_ext, ) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index f11954cf165fdae2ee36377456b2bc4b2d6755a6..565343c55a5b66d4e2510cda01ba521ba6eee850 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -22,7 +22,7 @@ use bridge_hub_rococo_runtime::{ xcm_config::{RelayNetwork, TokenLocation, XcmConfig}, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, EthereumGatewayAddress, Executive, ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, - RuntimeEvent, RuntimeOrigin, SessionKeys, SignedExtra, TransactionPayment, UncheckedExtrinsic, + RuntimeEvent, RuntimeOrigin, SessionKeys, TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_hub_test_utils::SlotDurations; use codec::{Decode, Encode}; @@ -48,7 +48,7 @@ fn construct_extrinsic( call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -64,14 +64,15 @@ fn construct_extrinsic( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), - ); - let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); + ) + .into(); + let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); UncheckedExtrinsic::new_signed( call, account_id.into(), Signature::Sr25519(signature.clone()), - extra, + tx_ext, ) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 8623f7cb366ecf3930b25563d6a912e53240d999..b5fe093b8be79c21e3298548e07b8327004d2a26 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -204,6 +204,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm-bridge-hub/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs index bce722aa5f87d006af0ec71429d6c84eeab4972d..934dce5c2c481092fed1a73162e3f7b1f0967a11 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs @@ -36,7 +36,7 @@ use bridge_runtime_common::{ XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, }, refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, + ActualFeeRefund, RefundBridgedParachainMessages, RefundTransactionExtensionAdapter, RefundableMessagesLane, RefundableParachain, }, }; @@ -190,7 +190,7 @@ impl ThisChainWithMessages for BridgeHubWestend { } /// Signed extension that refunds relayers that are delivering messages from the Rococo parachain. -pub type OnBridgeHubWestendRefundBridgeHubRococoMessages = RefundSignedExtensionAdapter< +pub type OnBridgeHubWestendRefundBridgeHubRococoMessages = RefundTransactionExtensionAdapter< RefundBridgedParachainMessages< Runtime, RefundableParachain, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index bd42a33370dc81eed36dce2741e4fb6dc01bf1f3..86ed8b2f488c1c7d9ac389582754d05019a3e45c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -97,8 +97,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -113,7 +113,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -142,12 +142,12 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if Balances::on_chain_storage_version() == StorageVersion::new(0) { - Balances::current_storage_version().put::(); + Balances::in_code_storage_version().put::(); writes.saturating_inc(); } @@ -176,7 +176,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 4, @@ -298,6 +298,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -515,12 +516,14 @@ bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -566,7 +569,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -761,6 +764,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -790,6 +794,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -843,6 +848,13 @@ impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use xcm::latest::prelude::*; @@ -1128,16 +1140,16 @@ mod tests { use codec::Encode; use sp_runtime::{ generic::Era, - traits::{SignedExtension, Zero}, + traits::{TransactionExtensionBase, Zero}, }; #[test] fn ensure_signed_extension_definition_is_compatible_with_relay() { - use bp_polkadot_core::SuffixedCommonSignedExtensionExt; + use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; sp_io::TestExternalities::default().execute_with(|| { frame_system::BlockHash::::insert(BlockNumber::zero(), Hash::default()); - let payload: SignedExtra = ( + let payload: TxExtension = ( frame_system::CheckNonZeroSender::new(), frame_system::CheckSpecVersion::new(), frame_system::CheckTxVersion::new(), @@ -1150,10 +1162,10 @@ mod tests { ( bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(), ), - ); + ).into(); { - let bh_indirect_payload = bp_bridge_hub_westend::SignedExtension::from_params( + let bh_indirect_payload = bp_bridge_hub_westend::TransactionExtension::from_params( VERSION.spec_version, VERSION.transaction_version, bp_runtime::TransactionEra::Immortal, @@ -1164,8 +1176,8 @@ mod tests { ); assert_eq!(payload.encode(), bh_indirect_payload.encode()); assert_eq!( - payload.additional_signed().unwrap().encode(), - bh_indirect_payload.additional_signed().unwrap().encode() + payload.implicit().unwrap().encode(), + bh_indirect_payload.implicit().unwrap().encode() ) } }); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..06f1114b4dea90cc9d830ff1f492428f3672b202 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ +// --chain=bridge-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_166_000 picoseconds. + Weight::from_parts(6_021_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_651_000 picoseconds. + Weight::from_parts(9_177_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 601_000 picoseconds. + Weight::from_parts(2_805_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_727_000 picoseconds. + Weight::from_parts(6_051_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 471_000 picoseconds. + Weight::from_parts(2_494_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 521_000 picoseconds. + Weight::from_parts(2_655_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_808_000 picoseconds. + Weight::from_parts(6_402_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs index a65ee31d3e55ff8135fdd7dec35120e0a463409b..e23033e0dfd313c60fe53f64a49ad2237477ecb5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs @@ -25,6 +25,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_bridge_grandpa; pub mod pallet_bridge_messages; @@ -35,6 +36,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..92c53b918792257ddf932148fa10a4ba8016653f --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ +// --chain=bridge-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `3` + // Estimated: `3593` + // Minimum execution time: 40_286_000 picoseconds. + Weight::from_parts(45_816_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs index 83e4260e77198355d23ea0c38481d7b8e68267c7..a78ff2355efaf06562e44828a8df0730481d4098 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_219_000 picoseconds. - Weight::from_parts(23_818_000, 0) + // Minimum execution time: 19_527_000 picoseconds. + Weight::from_parts(19_839_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -88,10 +88,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `70` + // Measured: `107` // Estimated: `3593` - // Minimum execution time: 90_120_000 picoseconds. - Weight::from_parts(92_545_000, 0) + // Minimum execution time: 90_938_000 picoseconds. + Weight::from_parts(92_822_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -124,10 +124,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `70` + // Measured: `107` // Estimated: `3593` - // Minimum execution time: 91_339_000 picoseconds. - Weight::from_parts(93_204_000, 0) + // Minimum execution time: 90_133_000 picoseconds. + Weight::from_parts(92_308_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_976_000 picoseconds. - Weight::from_parts(7_284_000, 0) + // Minimum execution time: 6_205_000 picoseconds. + Weight::from_parts(6_595_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_044_000 picoseconds. - Weight::from_parts(2_223_000, 0) + // Minimum execution time: 1_927_000 picoseconds. + Weight::from_parts(2_062_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 27_778_000 picoseconds. - Weight::from_parts(28_318_000, 0) + // Minimum execution time: 25_078_000 picoseconds. + Weight::from_parts(25_782_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 30_446_000 picoseconds. - Weight::from_parts(31_925_000, 0) + // Minimum execution time: 28_188_000 picoseconds. + Weight::from_parts(28_826_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,45 +224,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_037_000 picoseconds. - Weight::from_parts(2_211_000, 0) + // Minimum execution time: 1_886_000 picoseconds. + Weight::from_parts(1_991_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 15_620_000 picoseconds. - Weight::from_parts(15_984_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 17_443_000 picoseconds. + Weight::from_parts(17_964_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 15_689_000 picoseconds. - Weight::from_parts(16_093_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 17_357_000 picoseconds. + Weight::from_parts(18_006_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 16_946_000 picoseconds. - Weight::from_parts(17_192_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 18_838_000 picoseconds. + Weight::from_parts(19_688_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -282,36 +282,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 27_164_000 picoseconds. - Weight::from_parts(27_760_000, 0) + // Minimum execution time: 25_517_000 picoseconds. + Weight::from_parts(26_131_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_689_000 picoseconds. - Weight::from_parts(8_874_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 11_587_000 picoseconds. + Weight::from_parts(11_963_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 15_944_000 picoseconds. - Weight::from_parts(16_381_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 17_490_000 picoseconds. + Weight::from_parts(18_160_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -327,12 +327,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 33_826_000 picoseconds. - Weight::from_parts(34_784_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 34_088_000 picoseconds. + Weight::from_parts(34_598_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -343,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_257_000 picoseconds. - Weight::from_parts(4_383_000, 0) + // Minimum execution time: 3_566_000 picoseconds. + Weight::from_parts(3_754_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,10 +355,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 26_924_000 picoseconds. - Weight::from_parts(27_455_000, 0) + // Minimum execution time: 25_078_000 picoseconds. + Weight::from_parts(25_477_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 34_661_000 picoseconds. + Weight::from_parts(35_411_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 149a3bbeb75d82ba7c35641428c7677d18297f8c..7ea22befe95f88233dca4bf95d77e3a13ac8afa5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -24,7 +24,7 @@ use bridge_hub_westend_runtime::{ xcm_config::{RelayNetwork, WestendLocation, XcmConfig}, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - SignedExtra, TransactionPayment, UncheckedExtrinsic, + TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_to_rococo_config::{ BridgeGrandpaRococoInstance, BridgeHubRococoChainId, BridgeHubRococoLocation, @@ -65,7 +65,7 @@ fn construct_extrinsic( call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -78,14 +78,15 @@ fn construct_extrinsic( pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),), - ); - let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); + ) + .into(); + let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); UncheckedExtrinsic::new_signed( call, account_id.into(), Signature::Sr25519(signature.clone()), - extra, + tx_ext, ) } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index ed264f28c26e4d48bca416f1e6bea0f0049aa334..32d7e97bc45503793672889e67177c249a3e6e7b 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -117,6 +117,7 @@ runtime-benchmarks = [ "pallet-salary/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs index caf0cddec664a55ce080ed6052a9cdf42a5e2f28..e5b176fc77873805fb0e4ed6dba74d720ea3479a 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs @@ -81,7 +81,7 @@ where } fn proposal_of(proposal_hash: HashOf) -> Option> { - pallet_collective::Pallet::::proposal_of(proposal_hash) + pallet_collective::ProposalOf::::get(proposal_hash) } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 36eb0e07f87c16704ab27745c2f19efc113884e3..3887a1d74ecce047f02154721414bf554e3274e7 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -117,7 +117,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, @@ -175,6 +175,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = ConstU16<0>; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; @@ -231,6 +232,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -707,8 +709,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -719,7 +721,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// All migrations executed on runtime upgrade as a nested tuple of types implementing /// `OnRuntimeUpgrade`. Included migrations must be idempotent. type Migrations = ( @@ -745,6 +747,7 @@ pub type Executive = frame_executive::Executive< mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -752,6 +755,7 @@ mod benches { [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -803,7 +807,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -955,6 +959,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -972,6 +977,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1032,6 +1038,13 @@ impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } let whitelist: Vec = vec![ diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..f3030cc4f6aa5acbeec612ae17eb0cf2b4663383 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ +// --chain=collectives-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_497_000 picoseconds. + Weight::from_parts(5_961_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_240_000 picoseconds. + Weight::from_parts(8_175_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 671_000 picoseconds. + Weight::from_parts(3_005_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_426_000 picoseconds. + Weight::from_parts(6_131_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_715_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 491_000 picoseconds. + Weight::from_parts(2_635_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_958_000 picoseconds. + Weight::from_parts(6_753_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs index a9a298e547edb49738b9a0612d04d4141301d4ba..00b3bd92d5ef9ce0081ca9e95e43bd35098c6aa0 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs @@ -18,6 +18,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_alliance; pub mod pallet_asset_rate; pub mod pallet_balances; @@ -39,6 +40,7 @@ pub mod pallet_salary_fellowship_salary; pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_xcm; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..5d077b89d56421a05cd64bec99ecdad445049528 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ +// --chain=collectives-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 39_815_000 picoseconds. + Weight::from_parts(46_067_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs index 50dfbffde01f21d1138f0fdaa27649f962e245e4..5d427d850046ff030c6c5b6247426849227e7ea1 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 24_540_000 picoseconds. - Weight::from_parts(25_439_000, 0) + // Minimum execution time: 21_813_000 picoseconds. + Weight::from_parts(22_332_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 86_614_000 picoseconds. - Weight::from_parts(88_884_000, 0) + // Minimum execution time: 93_243_000 picoseconds. + Weight::from_parts(95_650_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 87_915_000 picoseconds. - Weight::from_parts(90_219_000, 0) + // Minimum execution time: 96_199_000 picoseconds. + Weight::from_parts(98_620_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_872_000 picoseconds. - Weight::from_parts(7_110_000, 0) + // Minimum execution time: 6_442_000 picoseconds. + Weight::from_parts(6_682_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_009_000 picoseconds. - Weight::from_parts(2_163_000, 0) + // Minimum execution time: 1_833_000 picoseconds. + Weight::from_parts(1_973_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_858_000 picoseconds. - Weight::from_parts(29_355_000, 0) + // Minimum execution time: 27_318_000 picoseconds. + Weight::from_parts(28_224_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 30_598_000 picoseconds. - Weight::from_parts(31_168_000, 0) + // Minimum execution time: 29_070_000 picoseconds. + Weight::from_parts(30_205_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,45 +224,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_090_000 picoseconds. - Weight::from_parts(2_253_000, 0) + // Minimum execution time: 1_904_000 picoseconds. + Weight::from_parts(2_033_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `162` - // Estimated: `11052` - // Minimum execution time: 16_133_000 picoseconds. - Weight::from_parts(16_433_000, 0) - .saturating_add(Weight::from_parts(0, 11052)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `159` + // Estimated: `13524` + // Minimum execution time: 18_348_000 picoseconds. + Weight::from_parts(18_853_000, 0) + .saturating_add(Weight::from_parts(0, 13524)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `11056` - // Minimum execution time: 16_012_000 picoseconds. - Weight::from_parts(16_449_000, 0) - .saturating_add(Weight::from_parts(0, 11056)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `163` + // Estimated: `13528` + // Minimum execution time: 17_964_000 picoseconds. + Weight::from_parts(18_548_000, 0) + .saturating_add(Weight::from_parts(0, 13528)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `13538` - // Minimum execution time: 17_922_000 picoseconds. - Weight::from_parts(18_426_000, 0) - .saturating_add(Weight::from_parts(0, 13538)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16013` + // Minimum execution time: 19_708_000 picoseconds. + Weight::from_parts(20_157_000, 0) + .saturating_add(Weight::from_parts(0, 16013)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -282,36 +282,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 27_280_000 picoseconds. - Weight::from_parts(28_026_000, 0) + // Minimum execution time: 26_632_000 picoseconds. + Weight::from_parts(27_314_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `206` - // Estimated: `8621` - // Minimum execution time: 9_387_000 picoseconds. - Weight::from_parts(9_644_000, 0) - .saturating_add(Weight::from_parts(0, 8621)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11096` + // Minimum execution time: 11_929_000 picoseconds. + Weight::from_parts(12_304_000, 0) + .saturating_add(Weight::from_parts(0, 11096)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `11063` - // Minimum execution time: 16_649_000 picoseconds. - Weight::from_parts(17_025_000, 0) - .saturating_add(Weight::from_parts(0, 11063)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `170` + // Estimated: `13535` + // Minimum execution time: 18_599_000 picoseconds. + Weight::from_parts(19_195_000, 0) + .saturating_add(Weight::from_parts(0, 13535)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -327,12 +327,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `11105` - // Minimum execution time: 34_355_000 picoseconds. - Weight::from_parts(35_295_000, 0) - .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `212` + // Estimated: `13577` + // Minimum execution time: 35_524_000 picoseconds. + Weight::from_parts(36_272_000, 0) + .saturating_add(Weight::from_parts(0, 13577)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -343,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_527_000 picoseconds. - Weight::from_parts(4_699_000, 0) + // Minimum execution time: 4_044_000 picoseconds. + Weight::from_parts(4_238_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,10 +355,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 27_011_000 picoseconds. - Weight::from_parts(27_398_000, 0) + // Minimum execution time: 25_741_000 picoseconds. + Weight::from_parts(26_301_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 35_925_000 picoseconds. + Weight::from_parts(36_978_000, 0) + .saturating_add(Weight::from_parts(0, 3625)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index dcc6c4e853a39d6acb15e205256141de1db4ed2e..747f75cf2e26326afcce5f2ab471777358a05a8a 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -158,6 +158,7 @@ runtime-benchmarks = [ "pallet-multisig/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs index 681b95ce6a535ca1dd7696dac0b647aec4c8709b..171ac6a9528f134d9c22548500805ef36e9504f9 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs @@ -21,6 +21,7 @@ use frame_support::{ parameter_types, traits::{ConstBool, ConstU32, Nothing}, }; +use frame_system::EnsureSigned; use pallet_contracts::{ weights::SubstrateWeight, Config, DebugInfo, DefaultAddressGenerator, Frame, Schedule, }; @@ -65,6 +66,8 @@ impl Config for Runtime { type MaxCodeLen = ConstU32<{ 123 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; type UnsafeUnstableInterface = ConstBool; + type UploadOrigin = EnsureSigned; + type InstantiateOrigin = EnsureSigned; type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type MaxDelegateDependencies = ConstU32<32>; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 541978098caaefaccc7724f56d8ff1b819fe42ec..8ce46ccd34f108cbc1961a0f1c6e0e0482f0db23 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -80,8 +80,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -93,7 +93,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -240,6 +240,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } parameter_types! { @@ -423,6 +424,7 @@ construct_runtime!( mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -466,7 +468,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -686,6 +688,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -703,6 +706,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -764,6 +768,13 @@ impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(EXISTENTIAL_DEPOSIT), + } + } } let whitelist: Vec = vec![ diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index 0bc3b510ed50e9ee590fd82961a7d08fb3f581b0..70a6a81bdcfb97c96894b07a880bdadf0f39161e 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -156,6 +156,7 @@ runtime-benchmarks = [ "pallet-multisig/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index fcd7576b1e17aafcf9f97d13cfe7c1a1e37f14b0..9913e47a93ab940df330c9ab0a6bef9c90da6f2d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -89,8 +89,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -103,7 +103,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-rococo"), impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 1_007_001, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, @@ -192,6 +192,8 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -251,6 +253,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -521,7 +524,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -740,6 +743,13 @@ impl_runtime_apis! { // Reserve transfers are disabled None } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } parameter_types! { diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..aac73680ad1296eaec9073277b1779b3b9f4d898 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ +// --chain=coretime-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs index f1050b3ae636261ff21674c3bb34c05bf6d232c5..7b6ab611e6f3bf48c69b2945120c5f5118854808 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs @@ -22,6 +22,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_broker; pub mod pallet_collator_selection; @@ -29,6 +30,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..29d48abab8956c79724b9544b99f06ef9303abaa --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ +// --chain=coretime-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs index 0e34cba4aaf5d9879dfecc97ffd736b985c98f23..c5d315467c1ed8b2aabf7ac18abe10931a02951b 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs @@ -16,26 +16,24 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=coretime-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=coretime-rococo-dev // --header=./cumulus/file_header.txt // --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ @@ -64,8 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 22_669_000 picoseconds. - Weight::from_parts(23_227_000, 0) + // Minimum execution time: 35_051_000 picoseconds. + Weight::from_parts(35_200_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -86,8 +84,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 64_486_000 picoseconds. - Weight::from_parts(65_247_000, 0) + // Minimum execution time: 56_235_000 picoseconds. + Weight::from_parts(58_178_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -128,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_020_000 picoseconds. - Weight::from_parts(7_300_000, 0) + // Minimum execution time: 6_226_000 picoseconds. + Weight::from_parts(6_403_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -139,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_022_000 picoseconds. - Weight::from_parts(2_141_000, 0) + // Minimum execution time: 2_020_000 picoseconds. + Weight::from_parts(2_100_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -164,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 26_893_000 picoseconds. - Weight::from_parts(27_497_000, 0) + // Minimum execution time: 24_387_000 picoseconds. + Weight::from_parts(24_814_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -188,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 29_673_000 picoseconds. - Weight::from_parts(30_693_000, 0) + // Minimum execution time: 27_039_000 picoseconds. + Weight::from_parts(27_693_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -200,45 +198,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_990_000 picoseconds. - Weight::from_parts(2_105_000, 0) + // Minimum execution time: 1_920_000 picoseconds. + Weight::from_parts(2_082_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 14_819_000 picoseconds. - Weight::from_parts(15_180_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 17_141_000 picoseconds. + Weight::from_parts(17_500_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 14_935_000 picoseconds. - Weight::from_parts(15_335_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 17_074_000 picoseconds. + Weight::from_parts(17_431_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 16_278_000 picoseconds. - Weight::from_parts(16_553_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 19_139_000 picoseconds. + Weight::from_parts(19_474_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -256,36 +254,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 26_360_000 picoseconds. - Weight::from_parts(26_868_000, 0) + // Minimum execution time: 24_346_000 picoseconds. + Weight::from_parts(25_318_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_615_000 picoseconds. - Weight::from_parts(8_903_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 11_777_000 picoseconds. + Weight::from_parts(12_051_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 15_284_000 picoseconds. - Weight::from_parts(15_504_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 17_538_000 picoseconds. + Weight::from_parts(17_832_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -299,12 +297,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `148` - // Estimated: `11038` - // Minimum execution time: 32_675_000 picoseconds. - Weight::from_parts(33_816_000, 0) - .saturating_add(Weight::from_parts(0, 11038)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `142` + // Estimated: `13507` + // Minimum execution time: 33_623_000 picoseconds. + Weight::from_parts(34_186_000, 0) + .saturating_add(Weight::from_parts(0, 13507)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -315,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_058_000 picoseconds. - Weight::from_parts(4_170_000, 0) + // Minimum execution time: 3_363_000 picoseconds. + Weight::from_parts(3_511_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -327,10 +325,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_375_000 picoseconds. - Weight::from_parts(26_026_000, 0) + // Minimum execution time: 23_969_000 picoseconds. + Weight::from_parts(24_347_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 34_071_000 picoseconds. + Weight::from_parts(35_031_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index a7d52dfd7849ec1a20ef400bd45f995374697409..549ef7ce4d78f54ea8fa88711a00e25966179dd0 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -153,6 +153,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 80eb7863803b62ed09d67ab703df7507360e5333..1a330821d3fcac112426bb9f8591190ec0d6428b 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -89,8 +89,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -103,7 +103,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-westend"), impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 1_007_001, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, @@ -192,6 +192,8 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -251,6 +253,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -512,7 +515,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -731,6 +734,13 @@ impl_runtime_apis! { // Reserve transfers are disabled None } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } parameter_types! { diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..0683158a01a646315d13016d019c9f590f10ef04 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ +// --chain=coretime-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs index f1050b3ae636261ff21674c3bb34c05bf6d232c5..7b6ab611e6f3bf48c69b2945120c5f5118854808 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs @@ -22,6 +22,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_broker; pub mod pallet_collator_selection; @@ -29,6 +30,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..f159f877afe77d7b7b98524d726e3867c6f80588 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ +// --chain=coretime-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs index d821a581b0dd7bae506f98a52bd28086b9d3c450..0082db3099d029c976779af8600bcaf4410e8a2f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs @@ -17,25 +17,23 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=coretime-westend-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=coretime-westend-dev // --header=./cumulus/file_header.txt // --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ @@ -64,8 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 17_946_000 picoseconds. - Weight::from_parts(18_398_000, 0) + // Minimum execution time: 18_410_000 picoseconds. + Weight::from_parts(18_657_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -86,8 +84,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 47_982_000 picoseconds. - Weight::from_parts(49_215_000, 0) + // Minimum execution time: 56_616_000 picoseconds. + Weight::from_parts(57_751_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -128,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_042_000 picoseconds. - Weight::from_parts(6_257_000, 0) + // Minimum execution time: 6_014_000 picoseconds. + Weight::from_parts(6_412_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -139,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_845_000 picoseconds. - Weight::from_parts(1_993_000, 0) + // Minimum execution time: 1_844_000 picoseconds. + Weight::from_parts(1_957_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -164,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 24_062_000 picoseconds. - Weight::from_parts(24_666_000, 0) + // Minimum execution time: 24_067_000 picoseconds. + Weight::from_parts(24_553_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -188,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 26_486_000 picoseconds. - Weight::from_parts(27_528_000, 0) + // Minimum execution time: 27_023_000 picoseconds. + Weight::from_parts(27_620_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -200,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_881_000 picoseconds. - Weight::from_parts(2_008_000, 0) + // Minimum execution time: 1_866_000 picoseconds. + Weight::from_parts(1_984_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -211,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 15_971_000 picoseconds. - Weight::from_parts(16_455_000, 0) + // Minimum execution time: 16_425_000 picoseconds. + Weight::from_parts(16_680_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -223,8 +221,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_603_000 picoseconds. - Weight::from_parts(17_037_000, 0) + // Minimum execution time: 16_171_000 picoseconds. + Weight::from_parts(16_564_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -235,8 +233,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 17_821_000 picoseconds. - Weight::from_parts(18_200_000, 0) + // Minimum execution time: 17_785_000 picoseconds. + Weight::from_parts(18_123_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -256,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 23_878_000 picoseconds. - Weight::from_parts(24_721_000, 0) + // Minimum execution time: 23_903_000 picoseconds. + Weight::from_parts(24_769_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -268,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 10_566_000 picoseconds. - Weight::from_parts(11_053_000, 0) + // Minimum execution time: 10_617_000 picoseconds. + Weight::from_parts(10_843_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -279,8 +277,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_020_000 picoseconds. - Weight::from_parts(16_619_000, 0) + // Minimum execution time: 16_656_000 picoseconds. + Weight::from_parts(17_106_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -301,8 +299,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `13507` - // Minimum execution time: 32_136_000 picoseconds. - Weight::from_parts(32_610_000, 0) + // Minimum execution time: 31_721_000 picoseconds. + Weight::from_parts(32_547_000, 0) .saturating_add(Weight::from_parts(0, 13507)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -315,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_336_000 picoseconds. - Weight::from_parts(3_434_000, 0) + // Minimum execution time: 3_439_000 picoseconds. + Weight::from_parts(3_619_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -327,10 +325,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 23_977_000 picoseconds. - Weight::from_parts(24_413_000, 0) + // Minimum execution time: 24_657_000 picoseconds. + Weight::from_parts(24_971_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 34_028_000 picoseconds. + Weight::from_parts(34_697_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 10408aaf39a7611f93178802e17403ed4fd90838..199057c3764797bf1da194c8ede174e80fd5d91c 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -100,7 +100,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -289,8 +289,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( pallet_sudo::CheckOnlySudoAccount, frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, @@ -301,7 +301,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -316,6 +316,7 @@ mod benches { frame_benchmarking::define_benchmarks!( [cumulus_pallet_parachain_system, ParachainSystem] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_glutton, Glutton] [pallet_message_queue, MessageQueue] [pallet_timestamp, Timestamp] @@ -332,7 +333,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -439,6 +440,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -455,6 +457,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..e6efa76230823821019e593fda8c5e095e36d4d0 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,119 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ +// --chain=glutton-westend-dev-1300 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_908_000 picoseconds. + Weight::from_parts(4_007_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_510_000 picoseconds. + Weight::from_parts(6_332_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 651_000 picoseconds. + Weight::from_parts(851_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_387_000 picoseconds. + Weight::from_parts(3_646_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 491_000 picoseconds. + Weight::from_parts(651_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 451_000 picoseconds. + Weight::from_parts(662_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 3_537_000 picoseconds. + Weight::from_parts(4_208_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index c0b8fb7636b5898861c0b12912c58fbf0d43f924..c9781639c3e42889edb5b6e046fae7319551800b 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -152,6 +152,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 90c398917695a0ef1150e42c6b61430bbb6de783..c5e420785de54e908af65f2d4515d678b9b6f005 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -83,8 +83,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -97,7 +97,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-rococo"), impl_name: create_runtime_str!("people-rococo"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, @@ -178,6 +178,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = ConstU32<16>; @@ -232,6 +233,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -453,6 +455,7 @@ mod benches { [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] // Polkadot [polkadot_runtime_common::identity_migrator, IdentityMigrator] // Cumulus @@ -495,7 +498,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -710,6 +713,13 @@ impl_runtime_apis! { fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { None } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use xcm::latest::prelude::*; diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..1ba2010991fef1dddcc37fa3fccef25599278d82 --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/ +// --chain=people-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs index 3396a8caea0599574da40135c74bc19f9cf52125..c4cea99ee5aa4bede0ceac4cece3614497bd7134 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs @@ -20,6 +20,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_collator_selection; pub mod pallet_identity; @@ -27,6 +28,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..555fd5a32fa874f3f046cbf819a2a4e938b4926b --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/ +// --chain=people-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs index 0f793524de9f5ef7da835090f9d81008ba756d59..fabce29b5fd9451f27364c38481d2dac98c430d8 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs @@ -1,40 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("people-kusama-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=people-kusama-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-kusama/src/weights/pallet_xcm.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=people-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,57 +48,28 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_931_000 picoseconds. - Weight::from_parts(26_340_000, 0) + // Minimum execution time: 17_830_000 picoseconds. + Weight::from_parts(18_411_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - fn teleport_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1489` - // Minimum execution time: 25_691_000 picoseconds. - Weight::from_parts(25_971_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) - fn reserve_transfer_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -108,18 +80,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn teleport_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 55_456_000 picoseconds. + Weight::from_parts(56_808_000, 0) + .saturating_add(Weight::from_parts(0, 3535)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `496` - // Estimated: `6208` - // Minimum execution time: 146_932_000 picoseconds. - Weight::from_parts(153_200_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(12)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -128,189 +120,189 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: PolkadotXcm SupportedVersion (r:0 w:1) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_572_000 picoseconds. - Weight::from_parts(9_924_000, 0) + // Minimum execution time: 5_996_000 picoseconds. + Weight::from_parts(6_154_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm SafeXcmVersion (r:0 w:1) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_default_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_997_000 picoseconds. - Weight::from_parts(3_136_000, 0) + // Minimum execution time: 1_768_000 picoseconds. + Weight::from_parts(1_914_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm QueryCounter (r:1 w:1) - /// Proof Skipped: PolkadotXcm QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm Queries (r:0 w:1) - /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 30_271_000 picoseconds. - Weight::from_parts(30_819_000, 0) + // Minimum execution time: 24_120_000 picoseconds. + Weight::from_parts(24_745_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm Queries (r:0 w:1) - /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `220` - // Estimated: `3685` - // Minimum execution time: 32_302_000 picoseconds. - Weight::from_parts(32_807_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `255` + // Estimated: `3720` + // Minimum execution time: 26_630_000 picoseconds. + Weight::from_parts(27_289_000, 0) + .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: PolkadotXcm XcmExecutionSuspended (r:0 w:1) - /// Proof Skipped: PolkadotXcm XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_suspension() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_960_000 picoseconds. - Weight::from_parts(3_094_000, 0) + // Minimum execution time: 1_821_000 picoseconds. + Weight::from_parts(1_946_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm SupportedVersion (r:4 w:2) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 14_877_000 picoseconds. - Weight::from_parts(15_296_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 16_586_000 picoseconds. + Weight::from_parts(16_977_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifiers (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 14_835_000 picoseconds. - Weight::from_parts(15_115_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 16_923_000 picoseconds. + Weight::from_parts(17_415_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:5 w:0) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 15_368_000 picoseconds. - Weight::from_parts(15_596_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 18_596_000 picoseconds. + Weight::from_parts(18_823_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:2 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 28_025_000 picoseconds. - Weight::from_parts(28_524_000, 0) + // Minimum execution time: 23_817_000 picoseconds. + Weight::from_parts(24_520_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:3 w:0) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_166_000 picoseconds. - Weight::from_parts(8_314_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 11_042_000 picoseconds. + Weight::from_parts(11_578_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 14_871_000 picoseconds. - Weight::from_parts(15_374_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 17_306_000 picoseconds. + Weight::from_parts(17_817_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 33_611_000 picoseconds. - Weight::from_parts(34_008_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 32_141_000 picoseconds. + Weight::from_parts(32_954_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -319,11 +311,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn new_query() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `1588` - // Minimum execution time: 5_496_000 picoseconds. - Weight::from_parts(5_652_000, 0) - .saturating_add(Weight::from_parts(0, 1588)) + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 3_410_000 picoseconds. + Weight::from_parts(3_556_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -331,11 +323,23 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn take_response() -> Weight { // Proof Size summary in bytes: - // Measured: `7740` - // Estimated: `11205` - // Minimum execution time: 26_140_000 picoseconds. - Weight::from_parts(26_824_000, 0) - .saturating_add(Weight::from_parts(0, 11205)) + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 25_021_000 picoseconds. + Weight::from_parts(25_240_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 33_801_000 picoseconds. + Weight::from_parts(34_655_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index e87e825a34e8d9ad8c7f41883f3cfbb826064b06..8d8421332c13d40c46636f7be01143617a9035a5 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -152,6 +152,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index a904f7c3521be8b7122eb36fc0a1baa053106b38..dea079a4a98ae6c5ee16aafd0d07f9e2b9dd34d5 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -83,8 +83,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The transactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -97,7 +97,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-westend"), impl_name: create_runtime_str!("people-westend"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, @@ -178,6 +178,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = ConstU32<16>; @@ -232,6 +233,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -495,7 +497,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -710,6 +712,13 @@ impl_runtime_apis! { fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { None } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use xcm::latest::prelude::*; diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..7130a62ab6a9c95aaf4938eff3c8a7ca0ed96d40 --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/ +// --chain=people-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs index 3396a8caea0599574da40135c74bc19f9cf52125..c4cea99ee5aa4bede0ceac4cece3614497bd7134 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs @@ -20,6 +20,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_collator_selection; pub mod pallet_identity; @@ -27,6 +28,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..30e4524e586e24247f604e82c0ea974d43ff060c --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/ +// --chain=people-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs index d3b60471b850e0fffbad01915aaf461be3d48ae0..c337289243b748d721b60421bccf3349044ef194 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs @@ -1,40 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("people-polkadot-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=people-polkadot-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-polkadot/src/weights/pallet_xcm.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=people-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,57 +48,28 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_783_000 picoseconds. - Weight::from_parts(26_398_000, 0) + // Minimum execution time: 17_856_000 picoseconds. + Weight::from_parts(18_473_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - fn teleport_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1489` - // Minimum execution time: 25_511_000 picoseconds. - Weight::from_parts(26_120_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) - fn reserve_transfer_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -108,18 +80,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn teleport_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 56_112_000 picoseconds. + Weight::from_parts(57_287_000, 0) + .saturating_add(Weight::from_parts(0, 3535)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `496` - // Estimated: `6208` - // Minimum execution time: 146_932_000 picoseconds. - Weight::from_parts(153_200_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(12)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -128,189 +120,189 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: PolkadotXcm SupportedVersion (r:0 w:1) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_707_000 picoseconds. - Weight::from_parts(9_874_000, 0) + // Minimum execution time: 6_186_000 picoseconds. + Weight::from_parts(6_420_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm SafeXcmVersion (r:0 w:1) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_default_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_073_000 picoseconds. - Weight::from_parts(3_183_000, 0) + // Minimum execution time: 1_824_000 picoseconds. + Weight::from_parts(1_999_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm QueryCounter (r:1 w:1) - /// Proof Skipped: PolkadotXcm QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm Queries (r:0 w:1) - /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 30_999_000 picoseconds. - Weight::from_parts(31_641_000, 0) + // Minimum execution time: 23_833_000 picoseconds. + Weight::from_parts(24_636_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm Queries (r:0 w:1) - /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `220` - // Estimated: `3685` - // Minimum execution time: 33_036_000 picoseconds. - Weight::from_parts(33_596_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `255` + // Estimated: `3720` + // Minimum execution time: 26_557_000 picoseconds. + Weight::from_parts(27_275_000, 0) + .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: PolkadotXcm XcmExecutionSuspended (r:0 w:1) - /// Proof Skipped: PolkadotXcm XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_suspension() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_035_000 picoseconds. - Weight::from_parts(3_154_000, 0) + // Minimum execution time: 1_921_000 picoseconds. + Weight::from_parts(2_040_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm SupportedVersion (r:4 w:2) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 14_805_000 picoseconds. - Weight::from_parts(15_120_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 16_832_000 picoseconds. + Weight::from_parts(17_312_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifiers (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 14_572_000 picoseconds. - Weight::from_parts(14_909_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 16_687_000 picoseconds. + Weight::from_parts(17_123_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:5 w:0) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 15_341_000 picoseconds. - Weight::from_parts(15_708_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 18_164_000 picoseconds. + Weight::from_parts(18_580_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:2 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 27_840_000 picoseconds. - Weight::from_parts(28_248_000, 0) + // Minimum execution time: 23_577_000 picoseconds. + Weight::from_parts(24_324_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:3 w:0) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_245_000 picoseconds. - Weight::from_parts(8_523_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 11_014_000 picoseconds. + Weight::from_parts(11_223_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 14_780_000 picoseconds. - Weight::from_parts(15_173_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 16_887_000 picoseconds. + Weight::from_parts(17_361_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 33_422_000 picoseconds. - Weight::from_parts(34_076_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 31_705_000 picoseconds. + Weight::from_parts(32_166_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -319,11 +311,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn new_query() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `1588` - // Minimum execution time: 5_496_000 picoseconds. - Weight::from_parts(5_652_000, 0) - .saturating_add(Weight::from_parts(0, 1588)) + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 3_568_000 picoseconds. + Weight::from_parts(3_669_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -331,11 +323,23 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn take_response() -> Weight { // Proof Size summary in bytes: - // Measured: `7740` - // Estimated: `11205` - // Minimum execution time: 26_140_000 picoseconds. - Weight::from_parts(26_824_000, 0) - .saturating_add(Weight::from_parts(0, 11205)) + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 24_823_000 picoseconds. + Weight::from_parts(25_344_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 34_516_000 picoseconds. + Weight::from_parts(35_478_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index ba077ef887947f6f0f2b639f85c10fcb13a06fcb..023c997830238051f3521932d39e2a13fe095d19 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -258,8 +258,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckSpecVersion, frame_system::CheckTxVersion, frame_system::CheckGenesis, @@ -269,7 +269,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -300,7 +300,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index 457394760d989db4dcd5ba335e1ff2dad46eaaba..fedac36e48d6452ef2b13e3dd88fa4f17a18516b 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -34,15 +34,19 @@ pub mod xcm_config; use codec::{Decode, Encode}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::AggregateMessageOrigin; -use frame_support::unsigned::TransactionValidityError; use scale_info::TypeInfo; use sp_api::impl_runtime_apis; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, DispatchInfoOf}, - transaction_validity::{TransactionSource, TransactionValidity}, + traits::{ + AccountIdLookup, BlakeTwo256, Block as BlockT, DispatchInfoOf, OriginOf, + TransactionExtension, TransactionExtensionBase, ValidateResult, + }, + transaction_validity::{ + InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, + }, ApplyExtrinsicResult, }; use sp_std::prelude::*; @@ -275,35 +279,37 @@ construct_runtime! { /// Simple implementation which fails any transaction which is signed. #[derive(Eq, PartialEq, Clone, Default, sp_core::RuntimeDebug, Encode, Decode, TypeInfo)] pub struct DisallowSigned; -impl sp_runtime::traits::SignedExtension for DisallowSigned { + +impl TransactionExtensionBase for DisallowSigned { const IDENTIFIER: &'static str = "DisallowSigned"; - type AccountId = AccountId; - type Call = RuntimeCall; - type AdditionalSigned = (); + type Implicit = (); +} + +impl TransactionExtension for DisallowSigned { + type Val = (); type Pre = (); - fn additional_signed( + fn validate( &self, - ) -> sp_std::result::Result<(), sp_runtime::transaction_validity::TransactionValidityError> { - Ok(()) + _origin: OriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _context: &mut C, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + Err(InvalidTransaction::BadProof.into()) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } - fn validate( - &self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, + _val: Self::Val, + _origin: &OriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { - let i = sp_runtime::transaction_validity::InvalidTransaction::BadProof; - Err(sp_runtime::transaction_validity::TransactionValidityError::Invalid(i)) + _context: &C, + ) -> Result { + Err(InvalidTransaction::BadProof.into()) } } @@ -323,11 +329,11 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = DisallowSigned; +/// The extension to the basic transaction logic. +pub type TxExtension = DisallowSigned; /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -357,7 +363,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 08e5987d43afd5cf2676e56dee79afa04362ada0..bca6aca051f80639c1e8cef0cf4548e0bf15a4ee 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -158,6 +158,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index bf8dcbc24c8d51d61c70ea6a56fa8865ea32c893..62065619e42f3cb686877cab5033359d97cd7101 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -114,8 +114,8 @@ pub type BlockId = generic::BlockId; // Id used for identifying assets. pub type AssetId = u32; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -128,7 +128,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Migrations = ( pallet_balances::migration::MigrateToTrackInactive, @@ -419,6 +419,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = (); } parameter_types! { @@ -608,6 +609,19 @@ impl pallet_collator_selection::Config for Runtime { type WeightInfo = (); } +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl pallet_asset_tx_payment::BenchmarkHelperTrait for AssetTxHelper { + fn create_asset_id_parameter(_id: u32) -> (u32, u32) { + unimplemented!("Penpal uses default weights"); + } + fn setup_balances_and_pool(_asset_id: u32, _account: AccountId) { + unimplemented!("Penpal uses default weights"); + } +} + impl pallet_asset_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; @@ -620,6 +634,9 @@ impl pallet_asset_tx_payment::Config for Runtime { >, AssetsToBlockAuthor, >; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetTxHelper; } impl pallet_sudo::Config for Runtime { @@ -668,6 +685,7 @@ construct_runtime!( mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_session, SessionBench::] @@ -699,7 +717,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -851,6 +869,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; let mut list = Vec::::new(); @@ -867,6 +886,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime {} use cumulus_pallet_session_benchmarking::Pallet as SessionBench; diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 42169e8949f553e1920508e92a059e5b76818099..2023d9abf5d4b4ca2be7d5dd0aa9368065833ae1 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -126,6 +126,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 57969d9a4f1882d218fdc97374f52f467f109398..469f06a1aa6fe42c08532aab003afbace08af504 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -107,7 +107,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -267,6 +267,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = (); } impl pallet_sudo::Config for Runtime { @@ -644,8 +645,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -657,7 +658,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -689,7 +690,7 @@ impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 84a232e954fc74a3dd4aef36750ef311794e72e5..a92e8a4c12ae721dc7c6c06b3f705dcedb87e2d3 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -136,6 +136,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "glutton-westend-runtime/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "parachains-common/runtime-benchmarks", "penpal-runtime/runtime-benchmarks", "people-rococo-runtime/runtime-benchmarks", diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs index 76dd7347ccbc35127747e144af7c8705a15022e4..7778d1bf7d2dc0187fe6a0684023a9a4648be596 100644 --- a/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs @@ -39,7 +39,7 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs index 0f01b85ebcf6fa63aabd9115c2ef553c18badea8..880f5d760c74559db87597bce158f8f5e62dbd82 100644 --- a/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs @@ -39,7 +39,7 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 4e06cd38f1d7fc7f4c7bf861162a1c8cf5390ec2..386551102e43617defd674c99831b99464c12bcc 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -1251,28 +1251,33 @@ where <::Pair as Pair>::Signature: TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, { - let client2 = client.clone(); + let verifier_client = client.clone(); let aura_verifier = move || { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client2).unwrap(); - Box::new(cumulus_client_consensus_aura::build_verifier::< ::Pair, _, _, _, >(cumulus_client_consensus_aura::BuildVerifierParams { - client: client2.clone(), - create_inherent_data_providers: move |_, _| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) + client: verifier_client.clone(), + create_inherent_data_providers: move |parent_hash, _| { + let cidp_client = verifier_client.clone(); + async move { + let slot_duration = cumulus_client_consensus_aura::slot_duration_at( + &*cidp_client, + parent_hash, + )?; + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + + Ok((slot, timestamp)) + } }, telemetry: telemetry_handle, })) as Box<_> diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index 4835fb5192b88165c1e7912862492086ddd0853e..9e8f60783d4dd1e4779e93af3f932b4034a1937a 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -14,9 +14,12 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } +sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } @@ -26,19 +29,27 @@ docify = "0.2.7" [dev-dependencies] sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } cumulus-test-runtime = { path = "../../test/runtime" } [features] default = ["std"] +runtime-benchmarks = [ + "cumulus-primitives-core/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] std = [ "codec/std", "cumulus-primitives-core/std", "cumulus-primitives-proof-size-hostfunction/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", "scale-info/std", + "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", diff --git a/cumulus/primitives/storage-weight-reclaim/src/benchmarking.rs b/cumulus/primitives/storage-weight-reclaim/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..2980d08271a2c8a50c338c84162fe4fe38a55c2e --- /dev/null +++ b/cumulus/primitives/storage-weight-reclaim/src/benchmarking.rs @@ -0,0 +1,70 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking setup for cumulus-primitives-storage-weight-reclaim + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use frame_benchmarking::{account, v2::*, BenchmarkError}; +use frame_support::pallet_prelude::DispatchClass; +use frame_system::{BlockWeight, RawOrigin}; +use sp_runtime::traits::{DispatchTransaction, Get}; +use sp_std::{ + marker::{Send, Sync}, + prelude::*, +}; + +/// Pallet we're benchmarking here. +pub struct Pallet(frame_system::Pallet); + +#[benchmarks(where + T: Send + Sync, + T::RuntimeCall: Dispatchable +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn storage_weight_reclaim() -> Result<(), BenchmarkError> { + let caller = account("caller", 0, 0); + BlockWeight::::mutate(|current_weight| { + current_weight.set(Weight::from_parts(0, 1000), DispatchClass::Normal); + }); + let base_extrinsic = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(0, 200)), + pays_fee: Default::default(), + }; + let len = 0_usize; + let ext = StorageWeightReclaim::::new(); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(post_info)) + .unwrap() + .unwrap(); + } + + assert_eq!(BlockWeight::::get().total().proof_size(), 700 + base_extrinsic.proof_size()); + + Ok(()) + } +} diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index 5dddc92e395574dbb36538f3799c0d21b22a3939..4bb40176b78c012ccb37c1bc0f068ff35c63731c 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -29,12 +29,22 @@ use frame_support::{ use frame_system::Config; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension}, + impl_tx_ext_default, + traits::{ + DispatchInfoOf, Dispatchable, PostDispatchInfoOf, TransactionExtension, + TransactionExtensionBase, + }, transaction_validity::TransactionValidityError, DispatchResult, }; use sp_std::marker::PhantomData; +#[cfg(test)] +mod tests; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + const LOG_TARGET: &'static str = "runtime::storage_reclaim"; /// `StorageWeightReclaimer` is a mechanism for manually reclaiming storage weight. @@ -43,7 +53,7 @@ const LOG_TARGET: &'static str = "runtime::storage_reclaim"; /// reclaim it computes the real consumed storage weight and refunds excess weight. /// /// # Example -#[doc = docify::embed!("src/lib.rs", simple_reclaimer_example)] +#[doc = docify::embed!("src/tests.rs", simple_reclaimer_example)] pub struct StorageWeightReclaimer { previous_remaining_proof_size: u64, previous_reported_proof_size: Option, @@ -119,42 +129,40 @@ impl core::fmt::Debug for StorageWeightReclaim { } } -impl SignedExtension for StorageWeightReclaim +impl TransactionExtensionBase for StorageWeightReclaim { + const IDENTIFIER: &'static str = "StorageWeightReclaim"; + type Implicit = (); +} + +impl TransactionExtension + for StorageWeightReclaim where T::RuntimeCall: Dispatchable, { - const IDENTIFIER: &'static str = "StorageWeightReclaim"; - - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); + type Val = (); type Pre = Option; - fn additional_signed( - &self, - ) -> Result - { - Ok(()) - } - - fn pre_dispatch( + fn prepare( self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, + _val: Self::Val, + _origin: &T::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> Result { + _context: &Context, + ) -> Result { Ok(get_proof_size()) } fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, _len: usize, _result: &DispatchResult, + _context: &Context, ) -> Result<(), TransactionValidityError> { - let Some(Some(pre_dispatch_proof_size)) = pre else { + let Some(pre_dispatch_proof_size) = pre else { return Ok(()); }; @@ -194,470 +202,6 @@ where }); Ok(()) } -} - -#[cfg(test)] -mod tests { - use super::*; - use frame_support::{ - assert_ok, - dispatch::DispatchClass, - weights::{Weight, WeightMeter}, - }; - use frame_system::{BlockWeight, CheckWeight}; - use sp_runtime::{AccountId32, BuildStorage}; - use sp_std::marker::PhantomData; - use sp_trie::proof_size_extension::ProofSizeExt; - - type Test = cumulus_test_runtime::Runtime; - const CALL: &::RuntimeCall = - &cumulus_test_runtime::RuntimeCall::System(frame_system::Call::set_heap_pages { - pages: 0u64, - }); - const ALICE: AccountId32 = AccountId32::new([1u8; 32]); - const LEN: usize = 0; - - pub fn new_test_ext() -> sp_io::TestExternalities { - let ext: sp_io::TestExternalities = cumulus_test_runtime::RuntimeGenesisConfig::default() - .build_storage() - .unwrap() - .into(); - ext - } - - struct TestRecorder { - return_values: Box<[usize]>, - counter: std::sync::atomic::AtomicUsize, - } - - impl TestRecorder { - fn new(values: &[usize]) -> Self { - TestRecorder { return_values: values.into(), counter: Default::default() } - } - } - - impl sp_trie::ProofSizeProvider for TestRecorder { - fn estimate_encoded_size(&self) -> usize { - let counter = self.counter.fetch_add(1, core::sync::atomic::Ordering::Relaxed); - self.return_values[counter] - } - } - - fn setup_test_externalities(proof_values: &[usize]) -> sp_io::TestExternalities { - let mut test_ext = new_test_ext(); - let test_recorder = TestRecorder::new(proof_values); - test_ext.register_extension(ProofSizeExt::new(test_recorder)); - test_ext - } - - fn set_current_storage_weight(new_weight: u64) { - BlockWeight::::mutate(|current_weight| { - current_weight.set(Weight::from_parts(0, new_weight), DispatchClass::Normal); - }); - } - - #[test] - fn basic_refund() { - // The real cost will be 100 bytes of storage size - let mut test_ext = setup_test_externalities(&[0, 100]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 500 - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(0)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - // We expect a refund of 400 - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(BlockWeight::::get().total().proof_size(), 600); - }) - } - - #[test] - fn does_nothing_without_extension() { - let mut test_ext = new_test_ext(); - - // Proof size extension not registered - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 500 - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, None); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(BlockWeight::::get().total().proof_size(), 1000); - }) - } - - #[test] - fn negative_refund_is_added_to_weight() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - // Benchmarked storage weight: 100 - let info = DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // We expect no refund - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(BlockWeight::::get().total().proof_size(), 1100); - }) - } - - #[test] - fn test_zero_proof_size() { - let mut test_ext = setup_test_externalities(&[0, 0]); - - test_ext.execute_with(|| { - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(0)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(BlockWeight::::get().total().proof_size(), 0); - }); - } - - #[test] - fn test_larger_pre_dispatch_proof_size() { - let mut test_ext = setup_test_externalities(&[300, 100]); - - test_ext.execute_with(|| { - set_current_storage_weight(1300); - - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(300)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(BlockWeight::::get().total().proof_size(), 800); - }); - } - - #[test] - fn test_incorporates_check_weight_unspent_weight() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 300 - let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; - - // Actual weight is 50 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 250)), - pays_fee: Default::default(), - }; - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(BlockWeight::::get().total().proof_size(), 900); - }) - } - - #[test] - fn test_incorporates_check_weight_unspent_weight_on_negative() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - // Benchmarked storage weight: 50 - let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; - - // Actual weight is 25 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 25)), - pays_fee: Default::default(), - }; - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(BlockWeight::::get().total().proof_size(), 1150); - }) - } - - #[test] - fn test_incorporates_check_weight_unspent_weight_reverse_order() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 300 - let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; - - // Actual weight is 50 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 250)), - pays_fee: Default::default(), - }; - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - // `CheckWeight` gets called after `StorageWeightReclaim` this time. - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - - assert_eq!(BlockWeight::::get().total().proof_size(), 900); - }) - } - - #[test] - fn test_incorporates_check_weight_unspent_weight_on_negative_reverse_order() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - // Benchmarked storage weight: 50 - let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; - // Actual weight is 25 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 25)), - pays_fee: Default::default(), - }; - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - // `CheckWeight` gets called after `StorageWeightReclaim` this time. - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - - assert_eq!(BlockWeight::::get().total().proof_size(), 1150); - }) - } - - #[test] - fn storage_size_reported_correctly() { - let mut test_ext = setup_test_externalities(&[1000]); - test_ext.execute_with(|| { - assert_eq!(get_proof_size(), Some(1000)); - }); - - let mut test_ext = new_test_ext(); - - let test_recorder = TestRecorder::new(&[0]); - - test_ext.register_extension(ProofSizeExt::new(test_recorder)); - - test_ext.execute_with(|| { - assert_eq!(get_proof_size(), Some(0)); - }); - } - - #[test] - fn storage_size_disabled_reported_correctly() { - let mut test_ext = setup_test_externalities(&[PROOF_RECORDING_DISABLED as usize]); - - test_ext.execute_with(|| { - assert_eq!(get_proof_size(), None); - }); - } - - #[test] - fn test_reclaim_helper() { - let mut test_ext = setup_test_externalities(&[1000, 1300, 1800]); - - test_ext.execute_with(|| { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 2000)); - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - remaining_weight_meter.consume(Weight::from_parts(0, 500)); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - assert_eq!(reclaimed, Some(Weight::from_parts(0, 200))); - - remaining_weight_meter.consume(Weight::from_parts(0, 800)); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - assert_eq!(reclaimed, Some(Weight::from_parts(0, 300))); - assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1200)); - }); - } - - #[test] - fn test_reclaim_helper_does_not_reclaim_negative() { - // Benchmarked weight does not change at all - let mut test_ext = setup_test_externalities(&[1000, 1300]); - - test_ext.execute_with(|| { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); - assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1000)); - }); - - // Benchmarked weight increases less than storage proof consumes - let mut test_ext = setup_test_externalities(&[1000, 1300]); - - test_ext.execute_with(|| { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - remaining_weight_meter.consume(Weight::from_parts(0, 0)); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); - }); - } - - /// Just here for doc purposes - fn get_benched_weight() -> Weight { - Weight::from_parts(0, 5) - } - - /// Just here for doc purposes - fn do_work() {} - - #[docify::export_content(simple_reclaimer_example)] - fn reclaim_with_weight_meter() { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(10, 10)); - - let benched_weight = get_benched_weight(); - - // It is important to instantiate the `StorageWeightReclaimer` before we consume the weight - // for a piece of work from the weight meter. - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - - if remaining_weight_meter.try_consume(benched_weight).is_ok() { - // Perform some work that takes has `benched_weight` storage weight. - do_work(); - - // Reclaimer will detect that we only consumed 2 bytes, so 3 bytes are reclaimed. - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - // We reclaimed 3 bytes of storage size! - assert_eq!(reclaimed, Some(Weight::from_parts(0, 3))); - assert_eq!(BlockWeight::::get().total().proof_size(), 10); - assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(10, 8)); - } - } - - #[test] - fn test_reclaim_helper_works_with_meter() { - // The node will report 12 - 10 = 2 consumed storage size between the calls. - let mut test_ext = setup_test_externalities(&[10, 12]); - - test_ext.execute_with(|| { - // Initial storage size is 10. - set_current_storage_weight(10); - reclaim_with_weight_meter(); - }); - } + impl_tx_ext_default!(T::RuntimeCall; Context; validate); } diff --git a/cumulus/primitives/storage-weight-reclaim/src/tests.rs b/cumulus/primitives/storage-weight-reclaim/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..631827cf442622ab2e4f18ee05ac3cac440e8077 --- /dev/null +++ b/cumulus/primitives/storage-weight-reclaim/src/tests.rs @@ -0,0 +1,484 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use frame_support::{ + assert_ok, + dispatch::DispatchClass, + weights::{Weight, WeightMeter}, +}; +use frame_system::{BlockWeight, CheckWeight}; +use sp_runtime::{traits::DispatchTransaction, AccountId32, BuildStorage}; +use sp_std::marker::PhantomData; +use sp_trie::proof_size_extension::ProofSizeExt; + +type Test = cumulus_test_runtime::Runtime; +const CALL: &::RuntimeCall = + &cumulus_test_runtime::RuntimeCall::System(frame_system::Call::set_heap_pages { pages: 0u64 }); +const ALICE: AccountId32 = AccountId32::new([1u8; 32]); +const LEN: usize = 0; + +fn new_test_ext() -> sp_io::TestExternalities { + let ext: sp_io::TestExternalities = cumulus_test_runtime::RuntimeGenesisConfig::default() + .build_storage() + .unwrap() + .into(); + ext +} + +struct TestRecorder { + return_values: Box<[usize]>, + counter: std::sync::atomic::AtomicUsize, +} + +impl TestRecorder { + fn new(values: &[usize]) -> Self { + TestRecorder { return_values: values.into(), counter: Default::default() } + } +} + +impl sp_trie::ProofSizeProvider for TestRecorder { + fn estimate_encoded_size(&self) -> usize { + let counter = self.counter.fetch_add(1, core::sync::atomic::Ordering::Relaxed); + self.return_values[counter] + } +} + +fn setup_test_externalities(proof_values: &[usize]) -> sp_io::TestExternalities { + let mut test_ext = new_test_ext(); + let test_recorder = TestRecorder::new(proof_values); + test_ext.register_extension(ProofSizeExt::new(test_recorder)); + test_ext +} + +fn set_current_storage_weight(new_weight: u64) { + BlockWeight::::mutate(|current_weight| { + current_weight.set(Weight::from_parts(0, new_weight), DispatchClass::Normal); + }); +} + +#[test] +fn basic_refund() { + // The real cost will be 100 bytes of storage size + let mut test_ext = setup_test_externalities(&[0, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + // We expect a refund of 400 + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 600); + }) +} + +#[test] +fn does_nothing_without_extension() { + let mut test_ext = new_test_ext(); + + // Proof size extension not registered + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, None); + + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1000); + }) +} + +#[test] +fn negative_refund_is_added_to_weight() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 100 + let info = DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // We expect no refund + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1100); + }) +} + +#[test] +fn test_zero_proof_size() { + let mut test_ext = setup_test_externalities(&[0, 0]); + + test_ext.execute_with(|| { + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 0); + }); +} + +#[test] +fn test_larger_pre_dispatch_proof_size() { + let mut test_ext = setup_test_externalities(&[300, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1300); + + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(300)); + + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 800); + }); +} + +#[test] +fn test_incorporates_check_weight_unspent_weight() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 300 + let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; + + // Actual weight is 50 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 250)), + pays_fee: Default::default(), + }; + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 900); + }) +} + +#[test] +fn test_incorporates_check_weight_unspent_weight_on_negative() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 50 + let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; + + // Actual weight is 25 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 25)), + pays_fee: Default::default(), + }; + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1150); + }) +} + +#[test] +fn test_incorporates_check_weight_unspent_weight_reverse_order() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 300 + let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; + + // Actual weight is 50 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 250)), + pays_fee: Default::default(), + }; + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + // `CheckWeight` gets called after `StorageWeightReclaim` this time. + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + + assert_eq!(BlockWeight::::get().total().proof_size(), 900); + }) +} + +#[test] +fn test_incorporates_check_weight_unspent_weight_on_negative_reverse_order() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 50 + let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; + + // Actual weight is 25 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 25)), + pays_fee: Default::default(), + }; + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + // `CheckWeight` gets called after `StorageWeightReclaim` this time. + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1150); + }) +} + +#[test] +fn storage_size_reported_correctly() { + let mut test_ext = setup_test_externalities(&[1000]); + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), Some(1000)); + }); + + let mut test_ext = new_test_ext(); + + let test_recorder = TestRecorder::new(&[0]); + + test_ext.register_extension(ProofSizeExt::new(test_recorder)); + + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), Some(0)); + }); +} + +#[test] +fn storage_size_disabled_reported_correctly() { + let mut test_ext = setup_test_externalities(&[PROOF_RECORDING_DISABLED as usize]); + + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), None); + }); +} + +#[test] +fn test_reclaim_helper() { + let mut test_ext = setup_test_externalities(&[1000, 1300, 1800]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 2000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + remaining_weight_meter.consume(Weight::from_parts(0, 500)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 200))); + + remaining_weight_meter.consume(Weight::from_parts(0, 800)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + assert_eq!(reclaimed, Some(Weight::from_parts(0, 300))); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1200)); + }); +} + +#[test] +fn test_reclaim_helper_does_not_reclaim_negative() { + // Benchmarked weight does not change at all + let mut test_ext = setup_test_externalities(&[1000, 1300]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1000)); + }); + + // Benchmarked weight increases less than storage proof consumes + let mut test_ext = setup_test_externalities(&[1000, 1300]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + remaining_weight_meter.consume(Weight::from_parts(0, 0)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); + }); +} + +/// Just here for doc purposes +fn get_benched_weight() -> Weight { + Weight::from_parts(0, 5) +} + +/// Just here for doc purposes +fn do_work() {} + +#[docify::export_content(simple_reclaimer_example)] +fn reclaim_with_weight_meter() { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(10, 10)); + + let benched_weight = get_benched_weight(); + + // It is important to instantiate the `StorageWeightReclaimer` before we consume the weight + // for a piece of work from the weight meter. + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + + if remaining_weight_meter.try_consume(benched_weight).is_ok() { + // Perform some work that takes has `benched_weight` storage weight. + do_work(); + + // Reclaimer will detect that we only consumed 2 bytes, so 3 bytes are reclaimed. + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + // We reclaimed 3 bytes of storage size! + assert_eq!(reclaimed, Some(Weight::from_parts(0, 3))); + assert_eq!(BlockWeight::::get().total().proof_size(), 10); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(10, 8)); + } +} + +#[test] +fn test_reclaim_helper_works_with_meter() { + // The node will report 12 - 10 = 2 consumed storage size between the calls. + let mut test_ext = setup_test_externalities(&[10, 12]); + + test_ext.execute_with(|| { + // Initial storage size is 10. + set_current_storage_weight(10); + reclaim_with_weight_meter(); + }); +} diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 028733ce23554967ee46e5ae8f2adebf6da70eb6..9dfa6ecb3513e42a4d85615a3c844ab98e451ad9 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -46,9 +46,11 @@ cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-w [features] runtime-benchmarks = [ "cumulus-primitives-core/runtime-benchmarks", + "cumulus-primitives-storage-weight-reclaim/runtime-benchmarks", "cumulus-test-service/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "sc-service/runtime-benchmarks", diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index c46f4da7f6788cf53dfdd6d7be5c221d81562e70..9239ff0f23eeb062d1eaefbe2341720e38178f07 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -19,7 +19,7 @@ mod block_builder; use codec::{Decode, Encode}; use runtime::{ - Balance, Block, BlockHashCount, Runtime, RuntimeCall, Signature, SignedExtra, SignedPayload, + Balance, Block, BlockHashCount, Runtime, RuntimeCall, Signature, SignedPayload, TxExtension, UncheckedExtrinsic, VERSION, }; use sc_executor::HeapAllocStrategy; @@ -125,7 +125,7 @@ impl DefaultTestClientBuilderExt for TestClientBuilder { /// Create an unsigned extrinsic from a runtime call. pub fn generate_unsigned(function: impl Into) -> UncheckedExtrinsic { - UncheckedExtrinsic::new_unsigned(function.into()) + UncheckedExtrinsic::new_bare(function.into()) } /// Create a signed extrinsic from a runtime call and sign @@ -143,7 +143,7 @@ pub fn generate_extrinsic_with_pair( let period = BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckGenesis::::new(), @@ -152,13 +152,14 @@ pub fn generate_extrinsic_with_pair( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::::new(), - ); + ) + .into(); let function = function.into(); let raw_payload = SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ((), VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| origin.sign(e)); @@ -167,7 +168,7 @@ pub fn generate_extrinsic_with_pair( function, origin.public().into(), Signature::Sr25519(signature), - extra, + tx_ext, ) } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 5fb3141098449ffa26f7e7ca09f30adee610f9e4..154948a24b48111c882fa76008007c7e7fdb861a 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -246,6 +246,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } impl pallet_sudo::Config for Runtime { @@ -322,8 +323,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckGenesis, @@ -335,7 +336,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -346,7 +347,7 @@ pub type Executive = frame_executive::Executive< TestOnRuntimeUpgrade, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; pub struct TestOnRuntimeUpgrade; @@ -377,7 +378,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 27273f4e0a8d35cfa1a21f45497c61a9b417d718..39e8aeba433ab52f2bfb151ca78a73e8d30c4185 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -104,10 +104,12 @@ substrate-test-utils = { path = "../../../substrate/test-utils" } runtime-benchmarks = [ "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-primitives-core/runtime-benchmarks", + "cumulus-primitives-storage-weight-reclaim/runtime-benchmarks", "cumulus-test-client/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", diff --git a/cumulus/test/service/src/bench_utils.rs b/cumulus/test/service/src/bench_utils.rs index 4ace894b392aa2393741572249ed35f0a7101845..15128b23787987f22d2da6f409e2a7d6702c0095 100644 --- a/cumulus/test/service/src/bench_utils.rs +++ b/cumulus/test/service/src/bench_utils.rs @@ -69,7 +69,7 @@ pub fn extrinsic_set_time(client: &TestClient) -> OpaqueExtrinsic { let timestamp = best_number as u64 * cumulus_test_runtime::MinimumPeriod::get(); cumulus_test_runtime::UncheckedExtrinsic { - signature: None, + preamble: sp_runtime::generic::Preamble::Bare, function: cumulus_test_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: timestamp, }), @@ -102,7 +102,7 @@ pub fn extrinsic_set_validation_data( }; cumulus_test_runtime::UncheckedExtrinsic { - signature: None, + preamble: sp_runtime::generic::Preamble::Bare, function: cumulus_test_runtime::RuntimeCall::ParachainSystem( cumulus_pallet_parachain_system::Call::set_validation_data { data }, ), diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 3554a383f219e7465f5df875bdf6463b93119f3b..f05dc5f180f5a9ec526117fd980042ca7420e949 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -883,7 +883,7 @@ pub fn construct_extrinsic( .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckGenesis::::new(), @@ -895,10 +895,11 @@ pub fn construct_extrinsic( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::::new(), - ); + ) + .into(); let raw_payload = runtime::SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ((), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| caller.sign(e)); @@ -906,7 +907,7 @@ pub fn construct_extrinsic( function, caller.public().into(), runtime::Signature::Sr25519(signature), - extra, + tx_ext, ) } diff --git a/cumulus/zombienet/tests/0002-pov_recovery.toml b/cumulus/zombienet/tests/0002-pov_recovery.toml index fe42fd4b2f6681154e89d5e7274618a8f307dfab..15a61eba2a0342bce55f805473d2c3ba9b51c7f8 100644 --- a/cumulus/zombienet/tests/0002-pov_recovery.toml +++ b/cumulus/zombienet/tests/0002-pov_recovery.toml @@ -4,7 +4,7 @@ default_command = "polkadot" chain = "rococo-local" -[relaychain.genesis.runtimeGenesis.patch.configuration.config] +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] # set parameters such that collators only connect to 1 validator as a backing group max_validators_per_core = 1 group_rotation_frequency = 100 # 10 mins diff --git a/docs/RELEASE.md b/docs/RELEASE.md index 6d681d78f367abff313c63adb1559ea6aa06dbc7..e73be2779a99426203e209da846f938c0f73cceb 100644 --- a/docs/RELEASE.md +++ b/docs/RELEASE.md @@ -18,10 +18,16 @@ Rococo. To easily refer to a release, it shall be named by its date in the form ## Crate -We try to follow [SemVer 2.0.0](https://semver.org/) as best as possible for versioning our crates. SemVer requires a -piece of software to first declare a public API. The public API of the Polkadot SDK is hereby declared as the sum of all -crates' public APIs. +We try to follow [SemVer 2.0.0](https://semver.org/) as best as possible for versioning our crates. The definitions of +`major`, `minor` and `patch` version for Rust crates are slightly altered from their standard for pre `1.0.0` versions. +Quoting [rust-lang.org](https://doc.rust-lang.org/cargo/reference/semver.html): +>Initial development releases starting with “0.y.z” can treat changes in “y” as a major release, and “z” as a minor +release. “0.0.z” releases are always major changes. This is because Cargo uses the convention that only changes in the +left-most non-zero component are considered incompatible. + +SemVer requires a piece of software to first declare a public API. The public API of the Polkadot SDK +is hereby declared as the sum of all crates' public APIs. Inductively, the public API of our library crates is declared as all public items that are neither: - Inside a `__private` module diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd index 93d3e92814cf1307fa7c0ea9f290ed21edae58fb..4eb50bcf96a8932de3fa90748fcfeb3ca7f02a5f 100644 --- a/docs/mermaid/IA.mmd +++ b/docs/mermaid/IA.mmd @@ -3,7 +3,7 @@ flowchart devhub --> polkadot_sdk devhub --> reference_docs - devhub --> tutorial + devhub --> guides polkadot_sdk --> substrate polkadot_sdk --> frame diff --git a/docs/mermaid/outer_runtime_types.mmd b/docs/mermaid/outer_runtime_types.mmd new file mode 100644 index 0000000000000000000000000000000000000000..c909df16af1ffbb697e3d363634fa218005a9c32 --- /dev/null +++ b/docs/mermaid/outer_runtime_types.mmd @@ -0,0 +1,3 @@ +flowchart LR + RuntimeCall --"TryInto"--> PalletCall + PalletCall --"Into"--> RuntimeCall diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 05aced8751aee8286c78ca4f5baeb398a0ac83f6..735b3d7df61d1fb033a5637010151e15ee5f368d 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -17,9 +17,13 @@ workspace = true # Needed for all FRAME-based code parity-scale-codec = { version = "3.0.0", default-features = false } scale-info = { version = "2.6.0", default-features = false } -frame = { path = "../../substrate/frame", features = ["experimental", "runtime"] } +frame = { path = "../../substrate/frame", features = [ + "experimental", + "runtime", +] } pallet-examples = { path = "../../substrate/frame/examples" } pallet-default-config-example = { path = "../../substrate/frame/examples/default-config" } +pallet-example-offchain-worker = { path = "../../substrate/frame/examples/offchain-worker" } # How we build docs in rust-docs simple-mermaid = "0.1.1" @@ -30,8 +34,12 @@ node-cli = { package = "staging-node-cli", path = "../../substrate/bin/node/cli" kitchensink-runtime = { path = "../../substrate/bin/node/runtime" } chain-spec-builder = { package = "staging-chain-spec-builder", path = "../../substrate/bin/utils/chain-spec-builder" } subkey = { path = "../../substrate/bin/utils/subkey" } +frame-system = { path = "../../substrate/frame/system", default-features = false } +frame-support = { path = "../../substrate/frame/support", default-features = false } +frame-executive = { path = "../../substrate/frame/executive", default-features = false } +pallet-example-single-block-migrations = { path = "../../substrate/frame/examples/single-block-migrations" } -# Substrate +# Substrate Client sc-network = { path = "../../substrate/client/network" } sc-rpc-api = { path = "../../substrate/client/rpc-api" } sc-rpc = { path = "../../substrate/client/rpc" } @@ -43,6 +51,7 @@ sc-consensus-grandpa = { path = "../../substrate/client/consensus/grandpa" } sc-consensus-beefy = { path = "../../substrate/client/consensus/beefy" } sc-consensus-manual-seal = { path = "../../substrate/client/consensus/manual-seal" } sc-consensus-pow = { path = "../../substrate/client/consensus/pow" } + substrate-wasm-builder = { path = "../../substrate/utils/wasm-builder" } # Cumulus @@ -52,7 +61,18 @@ cumulus-pallet-parachain-system = { path = "../../cumulus/pallets/parachain-syst ] } parachain-info = { package = "staging-parachain-info", path = "../../cumulus/parachains/pallets/parachain-info" } pallet-aura = { path = "../../substrate/frame/aura", default-features = false } + +# Pallets and FRAME internals pallet-timestamp = { path = "../../substrate/frame/timestamp" } +pallet-balances = { path = "../../substrate/frame/balances" } +pallet-transaction-payment = { path = "../../substrate/frame/transaction-payment" } +pallet-utility = { path = "../../substrate/frame/utility" } +pallet-multisig = { path = "../../substrate/frame/multisig" } +pallet-proxy = { path = "../../substrate/frame/proxy" } +pallet-authorship = { path = "../../substrate/frame/authorship" } +pallet-collective = { path = "../../substrate/frame/collective" } +pallet-democracy = { path = "../../substrate/frame/democracy" } +pallet-scheduler = { path = "../../substrate/frame/scheduler" } # Primitives sp-io = { path = "../../substrate/primitives/io" } @@ -60,13 +80,11 @@ sp-api = { path = "../../substrate/primitives/api" } sp-core = { path = "../../substrate/primitives/core" } sp-keyring = { path = "../../substrate/primitives/keyring" } sp-runtime = { path = "../../substrate/primitives/runtime" } +sp-offchain = { path = "../../substrate/primitives/offchain" } +sp-version = { path = "../../substrate/primitives/version" } # XCM xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } -[dev-dependencies] -parity-scale-codec = "3.6.5" -scale-info = "2.9.0" - [features] experimental = ["pallet-aura/experimental"] diff --git a/docs/sdk/headers/header.html b/docs/sdk/headers/header.html new file mode 100644 index 0000000000000000000000000000000000000000..e28458c4ccc791d9a72613ffb530b685828ea828 --- /dev/null +++ b/docs/sdk/headers/header.html @@ -0,0 +1,144 @@ + + + diff --git a/docs/sdk/headers/theme.css b/docs/sdk/headers/theme.css new file mode 100644 index 0000000000000000000000000000000000000000..a488e15c36b70ee76b14f429434daf0717bc0320 --- /dev/null +++ b/docs/sdk/headers/theme.css @@ -0,0 +1,17 @@ +:root { + --polkadot-pink: #E6007A; + --polkadot-green: #56F39A; + --polkadot-lime: #D3FF33; + --polkadot-cyan: #00B2FF; + --polkadot-purple: #552BBF; +} + +body.sdk-docs { + nav.sidebar>div.sidebar-crate>a>img { + width: 190px; + } + + nav.sidebar { + flex: 0 0 250px; + } +} diff --git a/docs/sdk/headers/toc.html b/docs/sdk/headers/toc.html deleted file mode 100644 index a4a074cb4f3153cb135da8608462d4ecf59144cb..0000000000000000000000000000000000000000 --- a/docs/sdk/headers/toc.html +++ /dev/null @@ -1,54 +0,0 @@ - - diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs index 29cdda36ed15ba3fcf964494732fc8364defff45..6a26292482477a712f34b787b5c82b6cebab9947 100644 --- a/docs/sdk/src/guides/your_first_pallet/mod.rs +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -128,8 +128,8 @@ //! //! Recall that within our pallet, (almost) all blocks of code are generic over ``. And, //! because `trait Config: frame_system::Config`, we can get access to all items in `Config` (or -//! `frame_system::Config`) using `T::NameOfItem`. This is all within the boundaries of how Rust -//! traits and generics work. If unfamiliar with this pattern, read +//! `frame_system::Config`) using `T::NameOfItem`. This is all within the boundaries of how +//! Rust traits and generics work. If unfamiliar with this pattern, read //! [`crate::reference_docs::trait_based_programming`] before going further. //! //! Crucially, a typical FRAME runtime contains a `struct Runtime`. The main role of this `struct` @@ -250,14 +250,16 @@ // of event is probably not the best. //! //! With the explanation out of the way, let's see how these components can be added. Both follow a -//! fairly familiar syntax: normal Rust enums, with an extra `#[frame::event/error]` attribute -//! attached. +//! fairly familiar syntax: normal Rust enums, with extra +//! [`#[frame::event]`](frame::pallet_macros::event) and +//! [`#[frame::error]`](frame::pallet_macros::error) attributes attached. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Event)] #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Error)] //! -//! One slightly custom part of this is the `#[pallet::generate_deposit(pub(super) fn -//! deposit_event)]` part. Without going into too much detail, in order for a pallet to emit events -//! to the rest of the system, it needs to do two things: +//! One slightly custom part of this is the [`#[pallet::generate_deposit(pub(super) fn +//! deposit_event)]`](frame::pallet_macros::generate_deposit) part. Without going into too +//! much detail, in order for a pallet to emit events to the rest of the system, it needs to do two +//! things: //! //! 1. Declare a type in its `Config` that refers to the overarching event type of the runtime. In //! short, by doing this, the pallet is expressing an important bound: `type RuntimeEvent: @@ -266,11 +268,12 @@ //! store it where needed. //! //! 2. But, doing this conversion and storing is too much to expect each pallet to define. FRAME -//! provides a default way of storing events, and this is what `pallet::generate_deposit` is doing. +//! provides a default way of storing events, and this is what +//! [`pallet::generate_deposit`](frame::pallet_macros::generate_deposit) is doing. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", config_v2)] //! //! > These `Runtime*` types are better explained in -//! > [`crate::reference_docs::frame_composite_enums`]. +//! > [`crate::reference_docs::frame_runtime_types`]. //! //! Then, we can rewrite the `transfer` dispatchable as such: #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_v2)] @@ -280,12 +283,12 @@ #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", runtime_v2)] //! //! In this snippet, the actual `RuntimeEvent` type (right hand side of `type RuntimeEvent = -//! RuntimeEvent`) is generated by `construct_runtime`. An interesting way to inspect this type is -//! to see its definition in rust-docs: +//! RuntimeEvent`) is generated by +//! [`construct_runtime`](frame::runtime::prelude::construct_runtime). An interesting way to inspect +//! this type is to see its definition in rust-docs: //! [`crate::guides::your_first_pallet::pallet_v2::tests::runtime_v2::RuntimeEvent`]. //! //! -//! //! ## What Next? //! //! The following topics where used in this guide, but not covered in depth. It is suggested to @@ -293,7 +296,7 @@ //! //! - [`crate::reference_docs::safe_defensive_programming`]. //! - [`crate::reference_docs::frame_origin`]. -//! - [`crate::reference_docs::frame_composite_enums`]. +//! - [`crate::reference_docs::frame_runtime_types`]. //! - The pallet we wrote in this guide was using `dev_mode`, learn more in //! [`frame::pallet_macros::config`]. //! - Learn more about the individual pallet items/macros, such as event and errors and call, in diff --git a/docs/sdk/src/lib.rs b/docs/sdk/src/lib.rs index 075d9ddaffe5bd95bafa4ca1d06667c253d4a21b..e211476d2514419a3d0889ef426817342155c178 100644 --- a/docs/sdk/src/lib.rs +++ b/docs/sdk/src/lib.rs @@ -15,7 +15,7 @@ //! - Start by learning about the the [`polkadot_sdk`], its structure and context. //! - Then, head over the [`guides`]. This modules contains in-depth guides about the most important //! user-journeys of the Polkadot SDK. -//! - Whilst reading the guides, you might find back-links to [`crate::reference_docs`]. +//! - Whilst reading the guides, you might find back-links to [`reference_docs`]. //! - Finally, is the parent website of this crate that contains the //! list of further tools related to the Polkadot SDK. //! @@ -25,6 +25,11 @@ #![doc = simple_mermaid::mermaid!("../../mermaid/IA.mmd")] #![warn(rustdoc::broken_intra_doc_links)] #![warn(rustdoc::private_intra_doc_links)] +#![doc(html_favicon_url = "https://polkadot.network/favicon-32x32.png")] +#![doc( + html_logo_url = "https://europe1.discourse-cdn.com/standard21/uploads/polkadot2/original/1X/eb57081e2bb7c39e5fcb1a98b443e423fa4448ae.svg" +)] +#![doc(issue_tracker_base_url = "https://github.com/paritytech/polkadot-sdk/issues")] /// Meta information about this crate, how it is built, what principles dictates its evolution and /// how one can contribute to it. diff --git a/docs/sdk/src/meta_contributing.rs b/docs/sdk/src/meta_contributing.rs index 7ecf8b0adfd3a2038bd32b6d6b830c71782cd1b2..fcdcea9934bb6b8cf7ee6ec090ebd0939888238c 100644 --- a/docs/sdk/src/meta_contributing.rs +++ b/docs/sdk/src/meta_contributing.rs @@ -101,7 +101,7 @@ //! * Before even getting started, what is with all of this ``? We link to //! [`crate::reference_docs::trait_based_programming`]. //! * First, the name. Why is this called `pallet::call`? This goes back to `enum Call`, which is -//! explained in [`crate::reference_docs::frame_composite_enums`]. Build on top of this! +//! explained in [`crate::reference_docs::frame_runtime_types`]. Build on top of this! //! * Then, what is `origin`? Just an account id? [`crate::reference_docs::frame_origin`]. //! * Then, what is `DispatchResult`? Why is this called *dispatch*? Probably something that can be //! explained in the documentation of [`frame::prelude::DispatchResult`]. @@ -138,7 +138,9 @@ //! injected, run: //! //! ```sh -//! SKIP_WASM_BUILD=1 RUSTDOCFLAGS="--html-in-header $(pwd)/docs/sdk/headers/toc.html" cargo doc -p polkadot-sdk-docs --no-deps --open +//! SKIP_WASM_BUILD=1 \ +//! RUSTDOCFLAGS="--html-in-header $(pwd)/docs/sdk/headers/header.html --extend-css $(pwd)/docs/sdk/headers/theme.css --default-theme=ayu" \ +//! cargo doc -p polkadot-sdk-docs --no-deps --open //! ``` //! //! If even faster build time for docs is needed, you can temporarily remove most of the diff --git a/docs/sdk/src/reference_docs/development_environment_advice.rs b/docs/sdk/src/reference_docs/development_environment_advice.rs index 4317695979367b32bfb673d2d368399c5ca35faa..21bbe78836c44b8afd70cab68e4b9b2f929fb4a0 100644 --- a/docs/sdk/src/reference_docs/development_environment_advice.rs +++ b/docs/sdk/src/reference_docs/development_environment_advice.rs @@ -111,3 +111,74 @@ //! If you have a powerful remote server available, you may consider using //! [cargo-remote](https://github.com/sgeisler/cargo-remote) to execute cargo commands on it, //! freeing up local resources for other tasks like `rust-analyzer`. +//! +//! When using `cargo-remote`, you can configure your editor to perform the the typical +//! "check-on-save" remotely as well. The configuration for VSCode is as follows: +//! +//! ```json +//! { +//! "rust-analyzer.cargo.buildScripts.overrideCommand": [ +//! "cargo", +//! "remote", +//! "--build-env", +//! "SKIP_WASM_BUILD=1", +//! "--", +//! "check", +//! "--message-format=json", +//! "--all-targets", +//! "--all-features", +//! "--target-dir=target/rust-analyzer" +//! ], +//! "rust-analyzer.check.overrideCommand": [ +//! "cargo", +//! "remote", +//! "--build-env", +//! "SKIP_WASM_BUILD=1", +//! "--", +//! "check", +//! "--workspace", +//! "--message-format=json", +//! "--all-targets", +//! "--all-features", +//! "--target-dir=target/rust-analyzer" +//! ], +//! } +//! ``` +//! +//! //! and the same in Lua for `neovim/nvim-lspconfig`: +//! +//! ```lua +//! ["rust-analyzer"] = { +//! cargo = { +//! buildScripts = { +//! overrideCommand = { +//! "cargo", +//! "remote", +//! "--build-env", +//! "SKIP_WASM_BUILD=1", +//! "--", +//! "check", +//! "--message-format=json", +//! "--all-targets", +//! "--all-features", +//! "--target-dir=target/rust-analyzer" +//! }, +//! }, +//! check = { +//! overrideCommand = { +//! "cargo", +//! "remote", +//! "--build-env", +//! "SKIP_WASM_BUILD=1", +//! "--", +//! "check", +//! "--workspace", +//! "--message-format=json", +//! "--all-targets", +//! "--all-features", +//! "--target-dir=target/rust-analyzer" +//! }, +//! }, +//! }, +//! }, +//! ``` diff --git a/docs/sdk/src/reference_docs/extrinsic_encoding.rs b/docs/sdk/src/reference_docs/extrinsic_encoding.rs index 9008f8f835f5d6cd1705c9dd4e4e4e78fd3aa2d9..be1af50003849f6500fe39d8bf2ad83c7b7dfbe2 100644 --- a/docs/sdk/src/reference_docs/extrinsic_encoding.rs +++ b/docs/sdk/src/reference_docs/extrinsic_encoding.rs @@ -56,7 +56,7 @@ //! version_and_signed, //! from_address, //! signature, -//! signed_extensions_extra, +//! transaction_extensions_extra, //! ) //! ``` //! @@ -90,31 +90,31 @@ //! The signature type used on the Polkadot relay chain is [`sp_runtime::MultiSignature`]; the //! variants there are the types of signature that can be provided. //! -//! ### signed_extensions_extra +//! ### transaction_extensions_extra //! //! This is the concatenation of the [SCALE encoded][frame::deps::codec] bytes representing each of -//! the [_signed extensions_][sp_runtime::traits::SignedExtension], and are configured by the -//! fourth generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`]. Learn more about -//! signed extensions [here][crate::reference_docs::signed_extensions]. +//! the [_transaction extensions_][sp_runtime::traits::TransactionExtension], and are configured by +//! the fourth generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`]. Learn more about +//! transaction extensions [here][crate::reference_docs::transaction_extensions]. //! -//! When it comes to constructing an extrinsic, each signed extension has two things that we are -//! interested in here: +//! When it comes to constructing an extrinsic, each transaction extension has two things that we +//! are interested in here: //! -//! - The actual SCALE encoding of the signed extension type itself; this is what will form our -//! `signed_extensions_extra` bytes. -//! - An `AdditionalSigned` type. This is SCALE encoded into the `signed_extensions_additional` data -//! of the _signed payload_ (see below). +//! - The actual SCALE encoding of the transaction extension type itself; this is what will form our +//! `transaction_extensions_extra` bytes. +//! - An `Implicit` type. This is SCALE encoded into the `transaction_extensions_implicit` data of +//! the _signed payload_ (see below). //! //! Either (or both) of these can encode to zero bytes. //! -//! Each chain configures the set of signed extensions that it uses in its runtime configuration. -//! At the time of writing, Polkadot configures them +//! Each chain configures the set of transaction extensions that it uses in its runtime +//! configuration. At the time of writing, Polkadot configures them //! [here](https://github.com/polkadot-fellows/runtimes/blob/1dc04eb954eadf8aadb5d83990b89662dbb5a074/relay/polkadot/src/lib.rs#L1432C25-L1432C25). -//! Some of the common signed extensions are defined -//! [here][frame::deps::frame_system#signed-extensions]. +//! Some of the common transaction extensions are defined +//! [here][frame::deps::frame_system#transaction-extensions]. //! -//! Information about exactly which signed extensions are present on a chain and in what order is -//! also a part of the metadata for the chain. For V15 metadata, it can be +//! Information about exactly which transaction extensions are present on a chain and in what order +//! is also a part of the metadata for the chain. For V15 metadata, it can be //! [found here][frame::deps::frame_support::__private::metadata::v15::ExtrinsicMetadata]. //! //! ## call_data @@ -127,7 +127,7 @@ //! runtimes, a call is represented as an enum of enums, where the outer enum represents the FRAME //! pallet being called, and the inner enum represents the call being made within that pallet, and //! any arguments to it. Read more about the call enum -//! [here][crate::reference_docs::frame_composite_enums]. +//! [here][crate::reference_docs::frame_runtime_types]. //! //! FRAME `Call` enums are automatically generated, and end up looking something like this: #![doc = docify::embed!("./src/reference_docs/extrinsic_encoding.rs", call_data)] @@ -163,8 +163,8 @@ //! ```text //! signed_payload = concat( //! call_data, -//! signed_extensions_extra, -//! signed_extensions_additional, +//! transaction_extensions_extra, +//! transaction_extensions_implicit, //! ) //! //! if length(signed_payload) > 256 { @@ -172,16 +172,16 @@ //! } //! ``` //! -//! The bytes representing `call_data` and `signed_extensions_extra` can be obtained as described -//! above. `signed_extensions_additional` is constructed by SCALE encoding the -//! ["additional signed" data][sp_runtime::traits::SignedExtension::AdditionalSigned] for each -//! signed extension that the chain is using, in order. +//! The bytes representing `call_data` and `transaction_extensions_extra` can be obtained as +//! descibed above. `transaction_extensions_implicit` is constructed by SCALE encoding the +//! ["implicit" data][sp_runtime::traits::TransactionExtensionBase::Implicit] for each +//! transaction extension that the chain is using, in order. //! //! Once we've concatenated those together, we hash the result if it's greater than 256 bytes in //! length using a Blake2 256bit hasher. //! //! The [`sp_runtime::generic::SignedPayload`] type takes care of assembling the correct payload -//! for us, given `call_data` and a tuple of signed extensions. +//! for us, given `call_data` and a tuple of transaction extensions. //! //! # Example Encoding //! @@ -192,11 +192,12 @@ #[docify::export] pub mod call_data { use parity_scale_codec::{Decode, Encode}; + use sp_runtime::{traits::Dispatchable, DispatchResultWithInfo}; // The outer enum composes calls within // different pallets together. We have two // pallets, "PalletA" and "PalletB". - #[derive(Encode, Decode)] + #[derive(Encode, Decode, Clone)] pub enum Call { #[codec(index = 0)] PalletA(PalletACall), @@ -207,23 +208,33 @@ pub mod call_data { // An inner enum represents the calls within // a specific pallet. "PalletA" has one call, // "Foo". - #[derive(Encode, Decode)] + #[derive(Encode, Decode, Clone)] pub enum PalletACall { #[codec(index = 0)] Foo(String), } - #[derive(Encode, Decode)] + #[derive(Encode, Decode, Clone)] pub enum PalletBCall { #[codec(index = 0)] Bar(String), } + + impl Dispatchable for Call { + type RuntimeOrigin = (); + type Config = (); + type Info = (); + type PostInfo = (); + fn dispatch(self, _origin: Self::RuntimeOrigin) -> DispatchResultWithInfo { + Ok(()) + } + } } #[docify::export] pub mod encoding_example { use super::call_data::{Call, PalletACall}; - use crate::reference_docs::signed_extensions::signed_extensions_example; + use crate::reference_docs::transaction_extensions::transaction_extensions_example; use parity_scale_codec::Encode; use sp_core::crypto::AccountId32; use sp_keyring::sr25519::Keyring; @@ -232,34 +243,40 @@ pub mod encoding_example { MultiAddress, MultiSignature, }; - // Define some signed extensions to use. We'll use a couple of examples - // from the signed extensions reference doc. - type SignedExtensions = - (signed_extensions_example::AddToPayload, signed_extensions_example::AddToSignaturePayload); + // Define some transaction extensions to use. We'll use a couple of examples + // from the transaction extensions reference doc. + type TransactionExtensions = ( + transaction_extensions_example::AddToPayload, + transaction_extensions_example::AddToSignaturePayload, + ); // We'll use `UncheckedExtrinsic` to encode our extrinsic for us. We set // the address and signature type to those used on Polkadot, use our custom - // `Call` type, and use our custom set of `SignedExtensions`. - type Extrinsic = - UncheckedExtrinsic, Call, MultiSignature, SignedExtensions>; + // `Call` type, and use our custom set of `TransactionExtensions`. + type Extrinsic = UncheckedExtrinsic< + MultiAddress, + Call, + MultiSignature, + TransactionExtensions, + >; pub fn encode_demo_extrinsic() -> Vec { // The "from" address will be our Alice dev account. let from_address = MultiAddress::::Id(Keyring::Alice.to_account_id()); - // We provide some values for our expected signed extensions. - let signed_extensions = ( - signed_extensions_example::AddToPayload(1), - signed_extensions_example::AddToSignaturePayload, + // We provide some values for our expected transaction extensions. + let transaction_extensions = ( + transaction_extensions_example::AddToPayload(1), + transaction_extensions_example::AddToSignaturePayload, ); // Construct our call data: let call_data = Call::PalletA(PalletACall::Foo("Hello".to_string())); // The signed payload. This takes care of encoding the call_data, - // signed_extensions_extra and signed_extensions_additional, and hashing + // transaction_extensions_extra and transaction_extensions_implicit, and hashing // the result if it's > 256 bytes: - let signed_payload = SignedPayload::new(&call_data, signed_extensions.clone()); + let signed_payload = SignedPayload::new(call_data.clone(), transaction_extensions.clone()); // Sign the signed payload with our Alice dev account's private key, // and wrap the signature into the expected type: @@ -269,7 +286,7 @@ pub mod encoding_example { }; // Now, we can build and encode our extrinsic: - let ext = Extrinsic::new_signed(call_data, from_address, signature, signed_extensions); + let ext = Extrinsic::new_signed(call_data, from_address, signature, transaction_extensions); let encoded_ext = ext.encode(); encoded_ext diff --git a/docs/sdk/src/reference_docs/frame_composite_enums.rs b/docs/sdk/src/reference_docs/frame_composite_enums.rs deleted file mode 100644 index 6051cd534467672b9831187ef5c8b814712f7d18..0000000000000000000000000000000000000000 --- a/docs/sdk/src/reference_docs/frame_composite_enums.rs +++ /dev/null @@ -1 +0,0 @@ -//! # FRAME Composite Enums diff --git a/docs/sdk/src/reference_docs/frame_offchain_workers.rs b/docs/sdk/src/reference_docs/frame_offchain_workers.rs new file mode 100644 index 0000000000000000000000000000000000000000..7999707e5ee018c4bb7634e7a506ff8fee8fa8ac --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_offchain_workers.rs @@ -0,0 +1,115 @@ +//! # Offchain Workers +//! +//! This reference document explains how offchain workers work in Substrate and FRAME. The main +//! focus is upon FRAME's implementation of this functionality. Nonetheless, offchain workers are a +//! Substrate-provided feature and can be used with possible alternatives to [`frame`] as well. +//! +//! Offchain workers are a commonly misunderstood topic, therefore we explain them bottom-up, +//! starting at the fundamentals and then describing the developer interface. +//! +//! ## Context +//! +//! Recall from [`crate::reference_docs::wasm_meta_protocol`] that the node and the runtime +//! communicate with one another via host functions and runtime APIs. Many of these interactions +//! contribute to the actual state transition of the blockchain. For example [`sp_api::Core`] is the +//! main runtime API that is called to execute new blocks. +//! +//! Offchain workers are in principle not different in any way: It is a runtime API exposed by the +//! wasm blob ([`sp_offchain::OffchainWorkerApi`]), and the node software calls into it when it +//! deems fit. But, crucially, this API call is different in that: +//! +//! 1. It can have no impact on the state ie. it is _OFF (the) CHAIN_. If any state is altered +//! during the execution of this API call, it is discarded. +//! 2. It has access to an extended set of host functions that allow the wasm blob to do more. For +//! example, call into HTTP requests. +//! +//! > The main way through which an offchain worker can interact with the state is by submitting an +//! > extrinsic to the chain. This is the ONLY way to alter the state from an offchain worker. +//! > [`pallet_example_offchain_worker`] provides an example of this. +//! +//! +//! Given the "Off Chain" nature of this API, it is important to remember that calling this API is +//! entirely optional. Some nodes might call into it, some might not, and it would have no impact on +//! the execution of your blockchain because no state is altered no matter the execution of the +//! offchain worker API. +//! +//! Substrate's CLI allows some degree of configuration about this, allowing node operators to +//! specify when they want to run the offchain worker API. See +//! [`sc_cli::RunCmd::offchain_worker_params`]. +//! +//! ## Nondeterministic Execution +//! +//! Needless to say, given the above description, the code in your offchain worker API can be +//! nondeterministic, as it is not part of the blockchain's STF, so it can be executed at unknown +//! times, by unknown nodes, and has no impact on the state. This is why an HTTP +//! ([`sp_runtime::offchain::http`]) API is readily provided to the offchain worker APIs. Because +//! there is no need for determinism in this context. +//! +//! > A common mistake here is for novice developers to see this HTTP API, and imagine that +//! > `polkadot-sdk` somehow magically solved the determinism in blockchains, and now a blockchain +//! > can make HTTP calls and it will all work. This is absolutely NOT the case. An HTTP call made +//! > by the offchain worker is non-deterministic by design. Blockchains can't and always won't be +//! > able to perform non-deterministic operations such as making HTTP calls to a foreign server. +//! +//! ## FRAME's API +//! +//! [`frame`] provides a simple API through which pallets can define offchain worker functions. This +//! is part of [`frame::traits::Hooks`], which is implemented as a part of +//! [`frame::pallet_macros::hooks`]. +//! +//! ``` +//! +//! #[frame::pallet] +//! pub mod pallet { +//! use frame::prelude::*; +//! +//! #[pallet::config] +//! pub trait Config: frame_system::Config {} +//! +//! #[pallet::pallet] +//! pub struct Pallet(_); +//! +//! #[pallet::hooks] +//! impl Hooks> for Pallet { +//! fn offchain_worker(block_number: BlockNumberFor) { +//! // ... +//! } +//! } +//! } +//! ``` +//! +//! Additionally, [`sp_runtime::offchain`] provides a set of utilities that can be used to moderate +//! the execution of offchain workers. +//! +//! ## Think Twice: Why Use Substrate's Offchain Workers? +//! +//! Consider the fact that in principle, an offchain worker code written using the above API is no +//! different than an equivalent written with an _actual offchain interaction library_, such as +//! [Polkadot-JS](https://polkadot.js.org/docs/), or any of the other ones listed [here](https://github.com/substrate-developer-hub/awesome-substrate?tab=readme-ov-file#client-libraries). +//! +//! They can both read from the state, and have no means of updating the state, other than the route +//! of submitting an extrinsic to the chain. Therefore, it is worth thinking twice before embedding +//! a logic as a part of Substrate's offchain worker API. Does it have to be there? can it not be a +//! simple, actual offchain application that lives outside of the chain's WASM blob? +//! +//! Some of the reasons why you might want to do the opposite, and actually embed an offchain worker +//! API into the WASM blob are: +//! +//! * Accessing the state is easier within the `offchain_worker` function, as it is already a part +//! of the runtime, and [`frame::pallet_macros::storage`] provides all the tools needed to read +//! the state. Other client libraries might provide varying degrees of capability here. +//! * It will be updated in synchrony with the runtime. A Substrate's offchain application is part +//! of the same WASM blob, and is therefore guaranteed to be up to date. +//! +//! For example, imagine you have modified a storage item to have a new type. This will possibly +//! require a [`crate::reference_docs::frame_runtime_upgrades_and_migrations`], and any offchain +//! code, such as a Polkadot-JS application, will have to be updated to reflect this change. Whereas +//! the WASM offchain worker code is guaranteed to already be updated, or else the runtime code will +//! not even compile. +//! +//! +//! ## Further References +//! +//! - +//! - +//! - [Offchain worker example](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/examples/offchain-worker) diff --git a/docs/sdk/src/reference_docs/frame_origin.rs b/docs/sdk/src/reference_docs/frame_origin.rs index a4078377cd77dad6b3aac78ba6acdddda14251a8..49533157b014d77b3766cbfc8c3353397e0e48bf 100644 --- a/docs/sdk/src/reference_docs/frame_origin.rs +++ b/docs/sdk/src/reference_docs/frame_origin.rs @@ -1,14 +1,260 @@ //! # FRAME Origin //! -//! Notes: -//! -//! - Def talk about account abstraction and how it is a solved issue in frame. See Gav's talk in -//! Protocol Berg 2023 -//! - system's raw origin, how it is amalgamated with other origins into one type -//! [`frame_composite_enums`] -//! - signed origin -//! - unsigned origin, link to [`fee_less_runtime`] -//! - Root origin, how no one can obtain it. -//! - Abstract origin: how FRAME allows you to express "origin is 2/3 of the this body or 1/2 of -//! that body or half of the token holders". -//! - `type CustomOrigin: EnsureOrigin<_>` in pallets. +//! Let's start by clarifying a common wrong assumption about Origin: +//! +//! **ORIGIN IS NOT AN ACCOUNT ID**. +//! +//! FRAME's origin abstractions allow you to convey meanings far beyond just an account-id being the +//! caller of an extrinsic. Nonetheless, an account-id having signed an extrinsic is one of the +//! meanings that an origin can convey. This is the commonly used [`frame_system::ensure_signed`], +//! where the return value happens to be an account-id. +//! +//! Instead, let's establish the following as the correct definition of an origin: +//! +//! > The origin type represents the privilege level of the caller of an extrinsic. +//! +//! That is, an extrinsic, through checking the origin, can *express what privilege level it wishes +//! to impose on the caller of the extrinsic*. One of those checks can be as simple as "*any account +//! that has signed a statement can pass*". +//! +//! But the origin system can also express more abstract and complicated privilege levels. For +//! example: +//! +//! * If the majority of token holders agreed upon this. This is more or less what the +//! [`pallet_democracy`] does under the hood ([reference](https://github.com/paritytech/polkadot-sdk/blob/edd95b3749754d2ed0c5738588e872c87be91624/substrate/frame/democracy/src/lib.rs#L1603-L1633)). +//! * If a specific ratio of an instance of [`pallet_collective`]/DAO agrees upon this. +//! * If another consensus system, for example a bridged network or a parachain, agrees upon this. +//! * If the majority of validator/authority set agrees upon this[^1]. +//! * If caller holds a particular NFT. +//! +//! and many more. +//! +//! ## Context +//! +//! First, let's look at where the `origin` type is encountered in a typical pallet. The `origin: +//! OriginFor` has to be the first argument of any given callable extrinsic in FRAME: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", call_simple)] +//! +//! Typically, the code of an extrinsic starts with an origin check, such as +//! [`frame_system::ensure_signed`]. +//! +//! Note that [`OriginFor`](frame_system::pallet_prelude::OriginFor) is merely a shorthand for +//! [`frame_system::Config::RuntimeOrigin`]. Given the name prefix `Runtime`, we can learn that +//! `RuntimeOrigin` is similar to `RuntimeCall` and others, a runtime composite enum that is +//! amalgamated at the runtime level. Read [`crate::reference_docs::frame_runtime_types`] to +//! familiarize yourself with these types. +//! +//! To understand this better, we will next create a pallet with a custom origin, which will add a +//! new variant to `RuntimeOrigin`. +//! +//! ## Adding Custom Pallet Origin to the Runtime +//! +//! For example, given a pallet that defines the following custom origin: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", custom_origin)] +//! +//! And a runtime with the following pallets: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", runtime_exp)] +//! +//! The type [`crate::reference_docs::frame_origin::runtime_for_origin::RuntimeOrigin`] is expanded. +//! This `RuntimeOrigin` contains a variant for the [`frame_system::RawOrigin`] and the custom +//! origin of the pallet. +//! +//! > Notice how the [`frame_system::ensure_signed`] is nothing more than a `match` statement. If +//! > you want to know where the actual origin of an extrinsic is set (and the signature +//! > verification happens, if any), see +//! > [`sp_runtime::generic::CheckedExtrinsic#trait-implementations`], specifically +//! > [`sp_runtime::traits::Applyable`]'s implementation. +//! +//! ## Asserting on a Custom Internal Origin +//! +//! In order to assert on a custom origin that is defined within your pallet, we need a way to first +//! convert the `::RuntimeOrigin` into the local `enum Origin` of the +//! current pallet. This is a common process that is explained in +//! [`crate::reference_docs::frame_runtime_types# +//! adding-further-constraints-to-runtime-composite-enums`]. +//! +//! We use the same process here to express that `RuntimeOrigin` has a number of additional bounds, +//! as follows. +//! +//! 1. Defining a custom `RuntimeOrigin` with further bounds in the pallet. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", custom_origin_bound)] +//! +//! 2. Using it in the pallet. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", custom_origin_usage)] +//! +//! ## Asserting on a Custom External Origin +//! +//! Very often, a pallet wants to have a parameterized origin that is **NOT** defined within the +//! pallet. In other words, a pallet wants to delegate an origin check to something that is +//! specified later at the runtime level. Like many other parameterizations in FRAME, this implies +//! adding a new associated type to `trait Config`. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", external_origin_def)] +//! +//! Then, within the pallet, we can simply use this "unknown" origin check type: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", external_origin_usage)] +//! +//! Finally, at the runtime, any implementation of [`frame::traits::EnsureOrigin`] can be passed. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", external_origin_provide)] +//! +//! Indeed, some of these implementations of [`frame::traits::EnsureOrigin`] are similar to the ones +//! that we know about: [`frame::runtime::prelude::EnsureSigned`], +//! [`frame::runtime::prelude::EnsureSignedBy`], [`frame::runtime::prelude::EnsureRoot`], +//! [`frame::runtime::prelude::EnsureNone`], etc. But, there are also many more that are not known +//! to us, and are defined in other pallets. +//! +//! For example, [`pallet_collective`] defines [`pallet_collective::EnsureMember`] and +//! [`pallet_collective::EnsureProportionMoreThan`] and many more, which is exactly what we alluded +//! to earlier in this document. +//! +//! Make sure to check the full list of [implementors of +//! `EnsureOrigin`](frame::traits::EnsureOrigin#implementors) for more inspiration. +//! +//! ## Obtaining Abstract Origins +//! +//! So far we have learned that FRAME pallets can assert on custom and abstract origin types, +//! whether they are defined within the pallet or not. But how can we obtain these abstract origins? +//! +//! > All extrinsics that come from the outer world can generally only be obtained as either +//! > `signed` or `none` origin. +//! +//! Generally, these abstract origins are only obtained within the runtime, when a call is +//! dispatched within the runtime. +//! +//! ## Further References +//! +//! - [Gavin Wood's speech about FRAME features at Protocol Berg 2023.](https://youtu.be/j7b8Upipmeg?si=83_XUgYuJxMwWX4g&t=195) +//! - [A related StackExchange question.](https://substrate.stackexchange.com/questions/10992/how-do-you-find-the-public-key-for-the-medium-spender-track-origin) +//! +//! [^1]: Inherents are essentially unsigned extrinsics that need an [`frame_system::ensure_none`] +//! origin check, and through the virtue of being an inherent, are agreed upon by all validators. + +use frame::prelude::*; + +#[frame::pallet(dev_mode)] +pub mod pallet_for_origin { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(call_simple)] + #[pallet::call] + impl Pallet { + pub fn do_something(_origin: OriginFor) -> DispatchResult { + // ^^^^^^^^^^^^^^^^^^^^^ + todo!(); + } + } +} + +#[frame::pallet(dev_mode)] +pub mod pallet_with_custom_origin { + use super::*; + + #[docify::export(custom_origin_bound)] + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeOrigin: From<::RuntimeOrigin> + + Into::RuntimeOrigin>>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(custom_origin)] + /// A dummy custom origin. + #[pallet::origin] + #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + pub enum Origin { + /// If all holders of a particular NFT have agreed upon this. + AllNftHolders, + /// If all validators have agreed upon this. + ValidatorSet, + } + + #[docify::export(custom_origin_usage)] + #[pallet::call] + impl Pallet { + pub fn only_validators(origin: OriginFor) -> DispatchResult { + // first, we convert from `::RuntimeOrigin` to `::RuntimeOrigin` + let local_runtime_origin = <::RuntimeOrigin as From< + ::RuntimeOrigin, + >>::from(origin); + // then we convert to `origin`, if possible + let local_origin = + local_runtime_origin.into().map_err(|_| "invalid origin type provided")?; + ensure!(matches!(local_origin, Origin::ValidatorSet), "Not authorized"); + todo!(); + } + } +} + +pub mod runtime_for_origin { + use super::pallet_with_custom_origin; + use frame::{runtime::prelude::*, testing_prelude::*}; + + #[docify::export(runtime_exp)] + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletWithCustomOrigin: pallet_with_custom_origin, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl pallet_with_custom_origin::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + } +} + +#[frame::pallet(dev_mode)] +pub mod pallet_with_external_origin { + use super::*; + #[docify::export(external_origin_def)] + #[pallet::config] + pub trait Config: frame_system::Config { + type ExternalOrigin: EnsureOrigin; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(external_origin_usage)] + #[pallet::call] + impl Pallet { + pub fn externally_checked_ext(origin: OriginFor) -> DispatchResult { + let _ = T::ExternalOrigin::ensure_origin(origin)?; + todo!(); + } + } +} + +pub mod runtime_for_external_origin { + use super::*; + use frame::{runtime::prelude::*, testing_prelude::*}; + + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletWithExternalOrigin: pallet_with_external_origin, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + #[docify::export(external_origin_provide)] + impl pallet_with_external_origin::Config for Runtime { + type ExternalOrigin = EnsureSigned<::AccountId>; + } +} diff --git a/docs/sdk/src/reference_docs/frame_runtime_migration.rs b/docs/sdk/src/reference_docs/frame_runtime_migration.rs deleted file mode 100644 index 0616ccbb6f57971823c7347a60574ef0c0bef2ab..0000000000000000000000000000000000000000 --- a/docs/sdk/src/reference_docs/frame_runtime_migration.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! # Runtime Runtime Upgrade and Testing -//! -//! -//! Notes: -//! -//! - Flow of things, when does `on_runtime_upgrade` get called. Link to to `Hooks` and its diagram -//! as source of truth. -//! - Data migration and when it is needed. -//! - Look into the pba-lecture. diff --git a/docs/sdk/src/reference_docs/frame_runtime_types.rs b/docs/sdk/src/reference_docs/frame_runtime_types.rs new file mode 100644 index 0000000000000000000000000000000000000000..883892729d1c1fab97f58a69bc44546d0cee5dae --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_runtime_types.rs @@ -0,0 +1,306 @@ +//! # FRAME Runtime Types +//! +//! This reference document briefly explores the idea around types generated at the runtime level by +//! the FRAME macros. +//! +//! > As of now, many of these important types are generated within the internals of +//! > [`construct_runtime`], and there is no easy way for you to visually know they exist. +//! > [#polkadot-sdk#1378](https://github.com/paritytech/polkadot-sdk/pull/1378) is meant to +//! > significantly improve this. Exploring the rust-docs of a runtime, such as [`runtime`] which is +//! > defined in this module is as of now the best way to learn about these types. +//! +//! ## Composite Enums +//! +//! Many types within a FRAME runtime follow the following structure: +//! +//! * Each individual pallet defines a type, for example `Foo`. +//! * At the runtime level, these types are amalgamated into a single type, for example +//! `RuntimeFoo`. +//! +//! As the names suggest, all composite enums in a FRAME runtime start their name with `Runtime`. +//! For example, `RuntimeCall` is a representation of the most high level `Call`-able type in the +//! runtime. +//! +//! Composite enums are generally convertible to their individual parts as such: +#![doc = simple_mermaid::mermaid!("../../../mermaid/outer_runtime_types.mmd")] +//! +//! In that one can always convert from the inner type into the outer type, but not vice versa. This +//! is usually expressed by implementing `From`, `TryFrom`, `From>` and similar traits. +//! +//! ### Example +//! +//! We provide the following two pallets: [`pallet_foo`] and [`pallet_bar`]. Each define a +//! dispatchable, and `Foo` also defines a custom origin. Lastly, `Bar` defines an additional +//! `GenesisConfig`. +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", pallet_foo)] +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", pallet_bar)] +//! +//! Let's explore how each of these affect the [`RuntimeCall`], [`RuntimeOrigin`] and +//! [`RuntimeGenesisConfig`] generated in [`runtime`] by respectively. +//! +//! As observed, [`RuntimeCall`] has 3 variants, one for each pallet and one for `frame_system`. If +//! you explore further, you will soon realize that each variant is merely a pointer to the `Call` +//! type in each pallet, for example [`pallet_foo::Call`]. +//! +//! [`RuntimeOrigin`]'s [`OriginCaller`] has two variants, one for system, and one for `pallet_foo` +//! which utilized [`frame::pallet_macros::origin`]. +//! +//! Finally, [`RuntimeGenesisConfig`] is composed of `frame_system` and a variant for `pallet_bar`'s +//! [`pallet_bar::GenesisConfig`]. +//! +//! You can find other composite enums by scanning [`runtime`] for other types who's name starts +//! with `Runtime`. Some of the more noteworthy ones are: +//! +//! - [`RuntimeEvent`] +//! - [`RuntimeError`] +//! - [`RuntimeHoldReason`] +//! +//! ### Adding Further Constraints to Runtime Composite Enums +//! +//! This section explores a common scenario where a pallet has access to one of these runtime +//! composite enums, but it wishes to further specify it by adding more trait bounds to it. +//! +//! Let's take the example of `RuntimeCall`. This is an associated type in +//! [`frame_system::Config::RuntimeCall`], and all pallets have access to this type, because they +//! have access to [`frame_system::Config`]. Finally, this type is meant to be set to outer call of +//! the entire runtime. +//! +//! But, let's not forget that this is information that *we know*, and the Rust compiler does not. +//! All that the rust compiler knows about this type is *ONLY* what the trait bounds of +//! [`frame_system::Config::RuntimeCall`] are specifying: +#![doc = docify::embed!("../../substrate/frame/system/src/lib.rs", system_runtime_call)] +//! +//! So, when at a given pallet, one accesses `::RuntimeCall`, the type is +//! extremely opaque from the perspective of the Rust compiler. +//! +//! How can a pallet access the `RuntimeCall` type with further constraints? For example, each +//! pallet has its own `enum Call`, and knows that its local `Call` is a part of `RuntimeCall`, +//! therefore there should be a `impl From> for RuntimeCall`. +//! +//! The only way to express this using Rust's associated types is for the pallet to **define its own +//! associated type `RuntimeCall`, and further specify what it thinks `RuntimeCall` should be**. +//! +//! In this case, we will want to assert the existence of [`frame::traits::IsSubType`], which is +//! very similar to [`TryFrom`]. +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", custom_runtime_call)] +//! +//! And indeed, at the runtime level, this associated type would be the same `RuntimeCall` that is +//! passed to `frame_system`. +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", pallet_with_specific_runtime_call_impl)] +//! +//! > In other words, the degree of specificity that [`frame_system::Config::RuntimeCall`] has is +//! > not enough for the pallet to work with. Therefore, the pallet has to define its own associated +//! > type representing `RuntimeCall`. +//! +//! Another way to look at this is: +//! +//! `pallet_with_specific_runtime_call::Config::RuntimeCall` and `frame_system::Config::RuntimeCall` +//! are two different representations of the same concrete type that is only known when the runtime +//! is being constructed. +//! +//! Now, within this pallet, this new `RuntimeCall` can be used, and it can use its new trait +//! bounds, such as being [`frame::traits::IsSubType`]: +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", custom_runtime_call_usages)] +//! +//! ### Asserting Equality of Multiple Runtime Composite Enums +//! +//! Recall that in the above example, `::RuntimeCall` and `::RuntimeCall` are expected to be equal types, but at the compile-time we +//! have to represent them with two different associated types with different bounds. Would it not +//! be cool if we had a test to make sure they actually resolve to the same concrete type once the +//! runtime is constructed? The following snippet exactly does that: +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", assert_equality)] +//! +//! We leave it to the reader to further explore what [`frame::traits::Hooks::integrity_test`] is, +//! and what [`core::any::TypeId`] is. Another way to assert this is using +//! [`frame::traits::IsType`]. +//! +//! ## Type Aliases +//! +//! A number of type aliases are generated by the `construct_runtime` which are also noteworthy: +//! +//! * [`runtime::PalletFoo`] is an alias to [`pallet_foo::Pallet`]. Same for `PalletBar`, and +//! `System` +//! * [`runtime::AllPalletsWithSystem`] is an alias for a tuple of all of the above. This type is +//! important to FRAME internals such as `executive`, as it implements traits such as +//! [`frame::traits::Hooks`]. +//! +//! ## Further Details +//! +//! * [`crate::reference_docs::frame_origin`] explores further details about the usage of +//! `RuntimeOrigin`. +//! * [`RuntimeCall`] is a particularly interesting composite enum as it dictates the encoding of an +//! extrinsic. See [`crate::reference_docs::transaction_extensions`] for more information. +//! * See the documentation of [`construct_runtime`]. +//! * See the corresponding lecture in the [pba-book](https://polkadot-blockchain-academy.github.io/pba-book/frame/outer-enum/page.html). +//! +//! +//! [`construct_runtime`]: frame::runtime::prelude::construct_runtime +//! [`runtime::PalletFoo`]: crate::reference_docs::frame_runtime_types::runtime::PalletFoo +//! [`runtime::AllPalletsWithSystem`]: crate::reference_docs::frame_runtime_types::runtime::AllPalletsWithSystem +//! [`runtime`]: crate::reference_docs::frame_runtime_types::runtime +//! [`pallet_foo`]: crate::reference_docs::frame_runtime_types::pallet_foo +//! [`pallet_foo::Call`]: crate::reference_docs::frame_runtime_types::pallet_foo::Call +//! [`pallet_foo::Pallet`]: crate::reference_docs::frame_runtime_types::pallet_foo::Pallet +//! [`pallet_bar`]: crate::reference_docs::frame_runtime_types::pallet_bar +//! [`pallet_bar::GenesisConfig`]: crate::reference_docs::frame_runtime_types::pallet_bar::GenesisConfig +//! [`RuntimeEvent`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeEvent +//! [`RuntimeGenesisConfig`]: +//! crate::reference_docs::frame_runtime_types::runtime::RuntimeGenesisConfig +//! [`RuntimeOrigin`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeOrigin +//! [`OriginCaller`]: crate::reference_docs::frame_runtime_types::runtime::OriginCaller +//! [`RuntimeError`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeError +//! [`RuntimeCall`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeCall +//! [`RuntimeHoldReason`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeHoldReason + +use frame::prelude::*; + +#[docify::export] +#[frame::pallet(dev_mode)] +pub mod pallet_foo { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::origin] + #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + pub enum Origin { + A, + B, + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet { + pub fn foo(_origin: OriginFor) -> DispatchResult { + todo!(); + } + + pub fn other(_origin: OriginFor) -> DispatchResult { + todo!(); + } + } +} + +#[docify::export] +#[frame::pallet(dev_mode)] +pub mod pallet_bar { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::genesis_config] + #[derive(DefaultNoBound)] + pub struct GenesisConfig { + pub initial_account: Option, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) {} + } + + #[pallet::call] + impl Pallet { + pub fn bar(_origin: OriginFor) -> DispatchResult { + todo!(); + } + } +} + +pub mod runtime { + use super::{pallet_bar, pallet_foo}; + use frame::{runtime::prelude::*, testing_prelude::*}; + + #[docify::export(runtime_exp)] + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletFoo: pallet_foo, + PalletBar: pallet_bar, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl pallet_foo::Config for Runtime {} + impl pallet_bar::Config for Runtime {} +} + +#[frame::pallet(dev_mode)] +pub mod pallet_with_specific_runtime_call { + use super::*; + use frame::traits::IsSubType; + + #[docify::export(custom_runtime_call)] + /// A pallet that wants to further narrow down what `RuntimeCall` is. + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeCall: IsSubType>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + // note that this pallet needs some `call` to have a `enum Call`. + #[pallet::call] + impl Pallet { + pub fn foo(_origin: OriginFor) -> DispatchResult { + todo!(); + } + } + + #[docify::export(custom_runtime_call_usages)] + impl Pallet { + fn _do_something_useful_with_runtime_call(call: ::RuntimeCall) { + // check if the runtime call given is of this pallet's variant. + let _maybe_my_call: Option<&Call> = call.is_sub_type(); + todo!(); + } + } + + #[docify::export(assert_equality)] + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + use core::any::TypeId; + assert_eq!( + TypeId::of::<::RuntimeCall>(), + TypeId::of::<::RuntimeCall>() + ); + } + } +} + +pub mod runtime_with_specific_runtime_call { + use super::pallet_with_specific_runtime_call; + use frame::{runtime::prelude::*, testing_prelude::*}; + + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletWithSpecificRuntimeCall: pallet_with_specific_runtime_call, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + #[docify::export(pallet_with_specific_runtime_call_impl)] + impl pallet_with_specific_runtime_call::Config for Runtime { + // an implementation of `IsSubType` is provided by `construct_runtime`. + type RuntimeCall = RuntimeCall; + } +} diff --git a/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs new file mode 100644 index 0000000000000000000000000000000000000000..7d870b432218e9e9a06c25f4f19e1f07ca72c6c6 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs @@ -0,0 +1,138 @@ +//! # Runtime Upgrades +//! +//! At their core, blockchain logic consists of +//! +//! 1. on-chain state and +//! 2. a state transition function +//! +//! In Substrate-based blockchains, state transition functions are referred to as +//! [runtimes](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/blockchain_state_machines/index.html). +//! +//! Traditionally, before Substrate, upgrading state transition functions required node +//! operators to download new software and restart their nodes in a process called +//! [forking](https://en.wikipedia.org/wiki/Fork_(blockchain)). +//! +//! Substrate-based blockchains do not require forking, and instead upgrade runtimes +//! in a process called "Runtime Upgrades". +//! +//! Forkless runtime upgrades are a defining feature of the Substrate framework. Updating the +//! runtime logic without forking the code base enables your blockchain to seemlessly evolve +//! over time in a deterministic, rules-based manner. It also removes ambiguity for node operators +//! and other participants in the network about what is the canonical runtime. +//! +//! This capability is possible due to the runtime of a blockchain existing in on-chain storage. +//! +//! ## Performing a Runtime Upgrade +//! +//! To upgrade a runtime, an [`Origin`](frame_system::RawOrigin) with the necesarry permissions +//! (usually via governance) changes the `:code` storage. Usually, this is performed via a call to +//! [`set_code`] (or [`set_code_without_checks`]) with the desired new runtime blob, scheduled +//! using [`pallet_scheduler`]. +//! +//! Prior to building the new runtime, don't forget to update the +//! [`RuntimeVersion`](sp_version::RuntimeVersion). +//! +//! # Migrations +//! +//! It is often desirable to define logic to execute immediately after runtime upgrades (see +//! [this diagram](frame::traits::Hooks)). +//! +//! Self-contained pieces of logic that execute after a runtime upgrade are called "Migrations". +//! +//! The typical use case of a migration is to 'migrate' pallet storage from one layout to another, +//! for example when the encoding of a storage item is changed. However, they can also execute +//! arbitary logic such as: +//! +//! - Calling arbitrary pallet methods +//! - Mutating arbitrary on-chain state +//! - Cleaning up some old storage items that are no longer needed +//! +//! ## Single Block Migrations +//! +//! - Execute immediately and entirely at the beginning of the block following +//! a runtime upgrade. +//! - Are suitable for migrations which are guaranteed to not exceed the block weight. +//! - Are simply implementations of [`OnRuntimeUpgrade`]. +//! +//! To learn best practices for writing single block pallet storage migrations, see the +//! [Single Block Migration Example Pallet](pallet_example_single_block_migrations). +//! +//! ### Scheduling the Single Block Migrations to Run Next Runtime Upgrade +//! +//! Schedule migrations to run next runtime upgrade passing them as a generic parameter to your +//! [`Executive`](frame_executive) pallet: +//! +//! ```ignore +//! /// Tuple of migrations (structs that implement `OnRuntimeUpgrade`) +//! type Migrations = ( +//! pallet_example_storage_migration::migrations::v1::versioned::MigrateV0ToV1, +//! MyCustomMigration, +//! // ...more migrations here +//! ); +//! pub type Executive = frame_executive::Executive< +//! Runtime, +//! Block, +//! frame_system::ChainContext, +//! Runtime, +//! AllPalletsWithSystem, +//! Migrations, // <-- pass your migrations to Executive here +//! >; +//! ``` +//! +//! ### Ensuring Single Block Migration Safety +//! +//! "My migration unit tests pass, so it should be safe to deploy right?" +//! +//! No! Unit tests execute the migration in a very simple test environment, and cannot account +//! for the complexities of a real runtime or real on-chain state. +//! +//! Prior to deploying migrations, it is critical to perform additional checks to ensure that when +//! run in our real runtime they will not brick the chain due to: +//! - Panicing +//! - Touching too many storage keys and resulting in an excessively large PoV +//! - Taking too long to execute +//! +//! [`try-runtime-cli`](https://github.com/paritytech/try-runtime-cli) has a sub-command +//! [`on-runtime-upgrade`](https://paritytech.github.io/try-runtime-cli/try_runtime_core/commands/enum.Action.html#variant.OnRuntimeUpgrade) +//! which is designed to help with exactly this. +//! +//! Developers MUST run this command before deploying migrations to ensure they will not +//! inadvertently result in a bricked chain. +//! +//! It is recommended to run as part of your CI pipeline. See the +//! [polkadot-sdk check-runtime-migration job](https://github.com/paritytech/polkadot-sdk/blob/4a293bc5a25be637c06ce950a34490706597615b/.gitlab/pipeline/check.yml#L103-L124) +//! for an example of how to configure this. +//! +//! ### Note on the Manipulability of PoV Size and Execution Time +//! +//! While [`try-runtime-cli`](https://github.com/paritytech/try-runtime-cli) can help ensure with +//! very high certainty that a migration will succeed given **existing** on-chain state, it cannot +//! prevent a malicious actor from manipulating state in a way that will cause the migration to take +//! longer or produce a PoV much larger than previously measured. +//! +//! Therefore, it is important to write migrations in such a way that the execution time or PoV size +//! it adds to the block cannot be easily manipulated. e.g., do not iterate over storage that can +//! quickly or cheaply be bloated. +//! +//! If writing your migration in such a way is not possible, a multi block migration should be used +//! instead. +//! +//! ### Other useful tools +//! +//! [`Chopsticks`](https://github.com/AcalaNetwork/chopsticks) is another tool in the Substrate +//! ecosystem which developers may find useful to use in addition to `try-runtime-cli` when testing +//! their single block migrations. +//! +//! ## Multi Block Migrations +//! +//! Safely and easily execute long-running migrations across multiple blocks. +//! +//! Suitable for migrations which could use arbitrary amounts of block weight. +//! +//! TODO: Link to multi block migration example/s once PR is merged (). +//! +//! [`GetStorageVersion`]: frame_support::traits::GetStorageVersion +//! [`OnRuntimeUpgrade`]: frame_support::traits::OnRuntimeUpgrade +//! [`StorageVersion`]: frame_support::traits::StorageVersion +//! [`set_code`]: frame_system::Call::set_code +//! [`set_code_without_checks`]: frame_system::Call::set_code_without_checks diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index c16122ee4287b17614f5d61acc9f26df0429c2cd..3fd815d2a159944efcf4bc44de347aa38bfad546 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -39,20 +39,20 @@ pub mod runtime_vs_smart_contract; /// Learn about how extrinsics are encoded to be transmitted to a node and stored in blocks. pub mod extrinsic_encoding; -/// Learn about the signed extensions that form a part of extrinsics. +/// Learn about the transaction extensions that form a part of extrinsics. // TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/42 -pub mod signed_extensions; +pub mod transaction_extensions; -/// Learn about *"Origin"* A topic in FRAME that enables complex account abstractions to be built. -// TODO: @shawntabrizi https://github.com/paritytech/polkadot-sdk-docs/issues/43 +/// Learn about *Origins*, a topic in FRAME that enables complex account abstractions to be built. pub mod frame_origin; /// Learn about how to write safe and defensive code in your FRAME runtime. // TODO: @CrackTheCode016 https://github.com/paritytech/polkadot-sdk-docs/issues/44 pub mod safe_defensive_programming; -/// Learn about composite enums in FRAME-based runtimes, such as "RuntimeEvent" and "RuntimeCall". -pub mod frame_composite_enums; +/// Learn about composite enums and other runtime level types, such as "RuntimeEvent" and +/// "RuntimeCall". +pub mod frame_runtime_types; /// Learn about how to make a pallet/runtime that is fee-less and instead uses another mechanism to /// control usage and sybil attacks. @@ -92,11 +92,14 @@ pub mod cli; // TODO: @JoshOrndorff @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/54 pub mod consensus_swapping; -/// Learn about all the advance ways to test your coordinate a rutnime upgrade and data migration. -// TODO: @liamaharon https://github.com/paritytech/polkadot-sdk-docs/issues/55 -pub mod frame_runtime_migration; +/// Learn about Runtime Upgrades and best practices for writing Migrations. +pub mod frame_runtime_upgrades_and_migrations; /// Learn about light nodes, how they function, and how Substrate-based chains come /// light-node-first out of the box. // TODO: @jsdw @josepot https://github.com/paritytech/polkadot-sdk-docs/issues/68 pub mod light_nodes; + +/// Learn about the offchain workers, how they function, and how to use them, as provided by the +/// [`frame`] APIs. +pub mod frame_offchain_workers; diff --git a/docs/sdk/src/reference_docs/signed_extensions.rs b/docs/sdk/src/reference_docs/signed_extensions.rs deleted file mode 100644 index 28b1426536bcdf371865db74a104a27d51869c86..0000000000000000000000000000000000000000 --- a/docs/sdk/src/reference_docs/signed_extensions.rs +++ /dev/null @@ -1,79 +0,0 @@ -//! Signed extensions are, briefly, a means for different chains to extend the "basic" extrinsic -//! format with custom data that can be checked by the runtime. -//! -//! # Example -//! -//! Defining a couple of very simple signed extensions looks like the following: -#![doc = docify::embed!("./src/reference_docs/signed_extensions.rs", signed_extensions_example)] - -#[docify::export] -pub mod signed_extensions_example { - use parity_scale_codec::{Decode, Encode}; - use scale_info::TypeInfo; - use sp_runtime::traits::SignedExtension; - - // This doesn't actually check anything, but simply allows - // some arbitrary `u32` to be added to the extrinsic payload - #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] - pub struct AddToPayload(pub u32); - - impl SignedExtension for AddToPayload { - const IDENTIFIER: &'static str = "AddToPayload"; - type AccountId = (); - type Call = (); - type AdditionalSigned = (); - type Pre = (); - - fn additional_signed( - &self, - ) -> Result< - Self::AdditionalSigned, - sp_runtime::transaction_validity::TransactionValidityError, - > { - Ok(()) - } - - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } - } - - // This is the opposite; nothing will be added to the extrinsic payload, - // but the AdditionalSigned type (`1234u32`) will be added to the - // payload to be signed. - #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] - pub struct AddToSignaturePayload; - - impl SignedExtension for AddToSignaturePayload { - const IDENTIFIER: &'static str = "AddToSignaturePayload"; - type AccountId = (); - type Call = (); - type AdditionalSigned = u32; - type Pre = (); - - fn additional_signed( - &self, - ) -> Result< - Self::AdditionalSigned, - sp_runtime::transaction_validity::TransactionValidityError, - > { - Ok(1234) - } - - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } - } -} diff --git a/docs/sdk/src/reference_docs/transaction_extensions.rs b/docs/sdk/src/reference_docs/transaction_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..4f96d665d9bd2745de7b9911bd06cb059c62e9b4 --- /dev/null +++ b/docs/sdk/src/reference_docs/transaction_extensions.rs @@ -0,0 +1,57 @@ +//! Transaction extensions are, briefly, a means for different chains to extend the "basic" +//! extrinsic format with custom data that can be checked by the runtime. +//! +//! # Example +//! +//! Defining a couple of very simple transaction extensions looks like the following: +#![doc = docify::embed!("./src/reference_docs/transaction_extensions.rs", transaction_extensions_example)] + +#[docify::export] +pub mod transaction_extensions_example { + use parity_scale_codec::{Decode, Encode}; + use scale_info::TypeInfo; + use sp_runtime::{ + impl_tx_ext_default, + traits::{Dispatchable, TransactionExtension, TransactionExtensionBase}, + TransactionValidityError, + }; + + // This doesn't actually check anything, but simply allows + // some arbitrary `u32` to be added to the extrinsic payload + #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] + pub struct AddToPayload(pub u32); + + impl TransactionExtensionBase for AddToPayload { + const IDENTIFIER: &'static str = "AddToPayload"; + type Implicit = (); + } + + impl TransactionExtension for AddToPayload { + type Pre = (); + type Val = (); + + impl_tx_ext_default!(Call; (); validate prepare); + } + + // This is the opposite; nothing will be added to the extrinsic payload, + // but the Implicit type (`1234u32`) will be added to the + // payload to be signed. + #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] + pub struct AddToSignaturePayload; + + impl TransactionExtensionBase for AddToSignaturePayload { + const IDENTIFIER: &'static str = "AddToSignaturePayload"; + type Implicit = u32; + + fn implicit(&self) -> Result { + Ok(1234) + } + } + + impl TransactionExtension for AddToSignaturePayload { + type Pre = (); + type Val = (); + + impl_tx_ext_default!(Call; (); validate prepare); + } +} diff --git a/polkadot/doc/testing.md b/polkadot/doc/testing.md deleted file mode 100644 index 76703b1b4398a0cac8423183a5d2febfabab0e6c..0000000000000000000000000000000000000000 --- a/polkadot/doc/testing.md +++ /dev/null @@ -1,302 +0,0 @@ -# Testing - -Testing is an essential tool to assure correctness. This document describes how we test the Polkadot code, whether -locally, at scale, and/or automatically in CI. - -## Scopes - -The testing strategy for Polkadot is 4-fold: - -### Unit testing (1) - -Boring, small scale correctness tests of individual functions. It is usually -enough to run `cargo test` in the crate you are testing. - -For full coverage you may have to pass some additional features. For example: - -```sh -cargo test --features ci-only-tests -``` - -### Integration tests - -There are the following variants of integration tests: - -#### Subsystem tests (2) - -One particular subsystem (subsystem under test) interacts with a mocked overseer that is made to assert incoming and -outgoing messages of the subsystem under test. See e.g. the `statement-distribution` tests. - -#### Behavior tests (3) - -Launching small scale networks, with multiple adversarial nodes. This should include tests around the thresholds in -order to evaluate the error handling once certain assumed invariants fail. - -Currently, we commonly use **zombienet** to run mini test-networks, whether locally or in CI. To run on your machine: - -- First, make sure you have [zombienet][zombienet] installed. - -- Now, all the required binaries must be installed in your $PATH. You must run the following from the `polkadot/` -directory in order to test your changes. (Not `zombienet setup`, or you will get the released binaries without your -local changes!) - -```sh -cargo install --path . --locked -``` - -- You will also need to install whatever binaries are required for your specific tests. For example, to install -`undying-collator`, from `polkadot/`, run: - -```sh -cargo install --path ./parachain/test-parachains/undying/collator --locked -``` - -- Finally, run the zombienet test from the `polkadot` directory: - -```sh -RUST_LOG=parachain::pvf=trace zombienet --provider=native spawn zombienet_tests/functional/0001-parachains-pvf.toml -``` - -- You can pick a validator node like `alice` from the output and view its logs -(`tail -f `) or metrics. Make sure there is nothing funny in the logs -(try `grep WARN `). - -#### Testing at scale (4) - -Launching many nodes with configurable network speed and node features in a cluster of nodes. At this scale the -[Simnet][simnet] comes into play which launches a full cluster of nodes. The scale is handled by spawning a kubernetes -cluster and the meta description is covered by [Gurke][Gurke]. Asserts are made using Grafana rules, based on the -existing prometheus metrics. This can be extended by adding an additional service translating `jaeger` spans into -addition prometheus avoiding additional Polkadot source changes. - -_Behavior tests_ and _testing at scale_ have naturally soft boundary. The most significant difference is the presence of -a real network and the number of nodes, since a single host often not capable to run multiple nodes at once. - -## Observing Logs - -To verify expected behavior it's often useful to observe logs. To avoid too many -logs at once, you can run one test at a time: - -1. Add `sp_tracing::try_init_simple();` to the beginning of a test -2. Specify `RUST_LOG=::=trace` before the cargo command. - -For example: - -```sh -RUST_LOG=parachain::pvf=trace cargo test execute_can_run_serially -``` - -For more info on how our logs work, check [the docs][logs]. - -## Coverage - -Coverage gives a _hint_ of the actually covered source lines by tests and test applications. - -The state of the art is currently tarpaulin which unfortunately yields a lot of false negatives. Lines that -are in fact covered, marked as uncovered due to a mere linebreak in a statement can cause these artifacts. This leads to -lower coverage percentages than there actually is. - -Since late 2020 rust has gained [MIR based coverage tooling]( -https://blog.rust-lang.org/inside-rust/2020/11/12/source-based-code-coverage.html). - -```sh -# setup -rustup component add llvm-tools-preview -cargo install grcov miniserve - -export CARGO_INCREMENTAL=0 -# wasm is not happy with the instrumentation -export SKIP_BUILD_WASM=true -export BUILD_DUMMY_WASM_BINARY=true -# the actully collected coverage data -export LLVM_PROFILE_FILE="llvmcoveragedata-%p-%m.profraw" -# build wasm without instrumentation -export WASM_TARGET_DIRECTORY=/tmp/wasm -cargo +nightly build -# required rust flags -export RUSTFLAGS="-Zinstrument-coverage" -# assure target dir is clean -rm -r target/{debug,tests} -# run tests to get coverage data -cargo +nightly test --all - -# create the *html* report out of all the test binaries -# mostly useful for local inspection -grcov . --binary-path ./target/debug -s . -t html --branch --ignore-not-existing -o ./coverage/ -miniserve -r ./coverage - -# create a *codecov* compatible report -grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore "/*" -o lcov.info -``` - -The test coverage in `lcov` can the be published to . - -```sh -bash <(curl -s https://codecov.io/bash) -f lcov.info -``` - -or just printed as part of the PR using a github action i.e. -[`jest-lcov-reporter`](https://github.com/marketplace/actions/jest-lcov-reporter). - -For full examples on how to use [`grcov` /w Polkadot specifics see the github -repo](https://github.com/mozilla/grcov#coverallscodecov-output). - -## Fuzzing - -Fuzzing is an approach to verify correctness against arbitrary or partially structured inputs. - -Currently implemented fuzzing targets: - -- `erasure-coding` - -The tooling of choice here is `honggfuzz-rs` as it allows _fastest_ coverage according to "some paper" which is a -positive feature when run as part of PRs. - -Fuzzing is generally not applicable for data secured by cryptographic hashes or signatures. Either the input has to be -specifically crafted, such that the discarded input percentage stays in an acceptable range. System level fuzzing is -hence simply not feasible due to the amount of state that is required. - -Other candidates to implement fuzzing are: - -- `rpc` -- ... - -## Performance metrics - -There are various ways of performance metrics. - -- timing with `criterion` -- cache hits/misses w/ `iai` harness or `criterion-perf` -- `coz` a performance based compiler - -Most of them are standard tools to aid in the creation of statistical tests regarding change in time of certain unit -tests. - -`coz` is meant for runtime. In our case, the system is far too large to yield a sufficient number of measurements in -finite time. An alternative approach could be to record incoming package streams per subsystem and store dumps of them, -which in return could be replayed repeatedly at an accelerated speed, with which enough metrics could be obtained to -yield information on which areas would improve the metrics. This unfortunately will not yield much information, since -most if not all of the subsystem code is linear based on the input to generate one or multiple output messages, it is -unlikely to get any useful metrics without mocking a sufficiently large part of the other subsystem which overlaps with -[#Integration tests] which is unfortunately not repeatable as of now. As such the effort gain seems low and this is not -pursued at the current time. - -## Writing small scope integration tests with preconfigured workers - -Requirements: - -- spawn nodes with preconfigured behaviors -- allow multiple types of configuration to be specified -- allow extendability via external crates -- ... - ---- - -## Implementation of different behavior strain nodes - -### Goals - -The main goals are is to allow creating a test node which exhibits a certain behavior by utilizing a subset of _wrapped_ -or _replaced_ subsystems easily. The runtime must not matter at all for these tests and should be simplistic. The -execution must be fast, this mostly means to assure a close to zero network latency as well as shorting the block time -and epoch times down to a few `100ms` and a few dozend blocks per epoch. - -### Approach - -#### MVP - -A simple small scale builder pattern would suffice for stage one implementation of allowing to replace individual -subsystems. An alternative would be to harness the existing `AllSubsystems` type and replace the subsystems as needed. - -#### Full `proc-macro` implementation - -`Overseer` is a common pattern. It could be extracted as `proc` macro and generative `proc-macro`. This would replace -the `AllSubsystems` type as well as implicitly create the `AllMessages` enum as `AllSubsystemsGen` does today. - -The implementation is yet to be completed, see the [implementation PR](https://github.com/paritytech/polkadot/pull/2962) -for details. - -##### Declare an overseer implementation - -```rust -struct BehaveMaleficient; - -impl OverseerGen for BehaveMaleficient { - fn generate<'a, Spawner, RuntimeClient>( - &self, - args: OverseerGenArgs<'a, Spawner, RuntimeClient>, - ) -> Result<(Overseer>, OverseerHandler), Error> - where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, - Spawner: 'static + overseer::gen::Spawner + Clone + Unpin, - { - let spawner = args.spawner.clone(); - let leaves = args.leaves.clone(); - let runtime_client = args.runtime_client.clone(); - let registry = args.registry.clone(); - let candidate_validation_config = args.candidate_validation_config.clone(); - // modify the subsystem(s) as needed: - let all_subsystems = create_default_subsystems(args)?. - // or spawn an entirely new set - - replace_candidate_validation( - // create the filtered subsystem - FilteredSubsystem::new( - CandidateValidationSubsystem::with_config( - candidate_validation_config, - Metrics::register(registry)?, - ), - // an implementation of - Skippy::default(), - ), - ); - - Overseer::new(leaves, all_subsystems, registry, runtime_client, spawner) - .map_err(|e| e.into()) - - // A builder pattern will simplify this further - // WIP https://github.com/paritytech/polkadot/pull/2962 - } -} - -fn main() -> eyre::Result<()> { - color_eyre::install()?; - let cli = Cli::from_args(); - assert_matches::assert_matches!(cli.subcommand, None); - polkadot_cli::run_node(cli, BehaveMaleficient)?; - Ok(()) -} -``` - -[`variant-a`](../node/malus/src/variant-a.rs) is a fully working example. - -#### Simnet - -Spawn a kubernetes cluster based on a meta description using [Gurke] with the [Simnet] scripts. - -Coordinated attacks of multiple nodes or subsystems must be made possible via a side-channel, that is out of scope for -this document. - -The individual node configurations are done as targets with a particular builder configuration. - -#### Behavior tests w/o Simnet - -Commonly this will require multiple nodes, and most machines are limited to running two or three nodes concurrently. -Hence, this is not the common case and is just an implementation _idea_. - -```rust -behavior_testcase!{ -"TestRuntime" => -"Alice": , -"Bob": , -"Charles": Default, -"David": "Charles", -"Eve": "Bob", -} -``` - -[zombienet]: https://github.com/paritytech/zombienet -[Gurke]: https://github.com/paritytech/gurke -[simnet]: https://github.com/paritytech/simnet_scripts -[logs]: https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/node/gum/src/lib.rs diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 456ae319787b07740bb0817dcfbe260500eb9dd8..8cc16a6e1ec199776003204e8d9c870fb0f62114 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -1253,13 +1253,20 @@ async fn handle_actions( Action::BecomeActive => { *mode = Mode::Active; - let messages = distribution_messages_for_activation( + let (messages, next_actions) = distribution_messages_for_activation( + ctx, overlayed_db, state, delayed_approvals_timers, - )?; + session_info_provider, + ) + .await?; ctx.send_messages(messages.into_iter()).await; + let next_actions: Vec = + next_actions.into_iter().map(|v| v.clone()).chain(actions_iter).collect(); + + actions_iter = next_actions.into_iter(); }, Action::Conclude => { conclude = true; @@ -1313,15 +1320,19 @@ fn get_assignment_core_indices( } } -fn distribution_messages_for_activation( +#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] +async fn distribution_messages_for_activation( + ctx: &mut Context, db: &OverlayedBackend<'_, impl Backend>, state: &State, delayed_approvals_timers: &mut DelayedApprovalTimer, -) -> SubsystemResult> { + session_info_provider: &mut RuntimeInfo, +) -> SubsystemResult<(Vec, Vec)> { let all_blocks: Vec = db.load_all_blocks()?; let mut approval_meta = Vec::with_capacity(all_blocks.len()); let mut messages = Vec::new(); + let mut actions = Vec::new(); messages.push(ApprovalDistributionMessage::NewBlocks(Vec::new())); // dummy value. @@ -1396,16 +1407,60 @@ fn distribution_messages_for_activation( &claimed_core_indices, &block_entry, ) { - Ok(bitfield) => messages.push( - ApprovalDistributionMessage::DistributeAssignment( - IndirectAssignmentCertV2 { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), - }, - bitfield, - ), - ), + Ok(bitfield) => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_entry.candidate_receipt().hash(), + ?block_hash, + "Discovered, triggered assignment, not approved yet", + ); + + let indirect_cert = IndirectAssignmentCertV2 { + block_hash, + validator: assignment.validator_index(), + cert: assignment.cert().clone(), + }; + messages.push( + ApprovalDistributionMessage::DistributeAssignment( + indirect_cert.clone(), + bitfield.clone(), + ), + ); + + if !block_entry + .candidate_is_pending_signature(*candidate_hash) + { + let ExtendedSessionInfo { ref executor_params, .. } = + match get_extended_session_info( + session_info_provider, + ctx.sender(), + block_entry.block_hash(), + block_entry.session(), + ) + .await + { + Some(i) => i, + None => continue, + }; + + actions.push(Action::LaunchApproval { + claimed_candidate_indices: bitfield, + candidate_hash: candidate_entry + .candidate_receipt() + .hash(), + indirect_cert, + assignment_tranche: assignment.tranche(), + relay_block_hash: block_hash, + session: block_entry.session(), + executor_params: executor_params.clone(), + candidate: candidate_entry + .candidate_receipt() + .clone(), + backing_group: approval_entry.backing_group(), + distribute_assignment: false, + }); + } + }, Err(err) => { // Should never happen. If we fail here it means the // assignment is null (no cores claimed). @@ -1496,7 +1551,7 @@ fn distribution_messages_for_activation( } messages[0] = ApprovalDistributionMessage::NewBlocks(approval_meta); - Ok(messages) + Ok((messages, actions)) } // Handle an incoming signal from the overseer. Returns true if execution should conclude. diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs index ef47bdb2213a153dc7223c1018ba8ec9b341a5aa..b924a1b52ccf49a242adecfd534b498004ec3d87 100644 --- a/polkadot/node/core/approval-voting/src/persisted_entries.rs +++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs @@ -588,6 +588,13 @@ impl BlockEntry { !self.candidates_pending_signature.is_empty() } + /// Returns true if candidate hash is in the queue for a signature. + pub fn candidate_is_pending_signature(&self, candidate_hash: CandidateHash) -> bool { + self.candidates_pending_signature + .values() + .any(|context| context.candidate_hash == candidate_hash) + } + /// Candidate hashes for candidates pending signatures fn candidate_hashes_pending_signature(&self) -> Vec { self.candidates_pending_signature diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 9220e84a2554a48c14468567e87e74319ce94151..c9053232a4c8752f03134047dbc64b020377740a 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -78,6 +78,7 @@ struct TestSyncOracle { struct TestSyncOracleHandle { done_syncing_receiver: oneshot::Receiver<()>, + is_major_syncing: Arc, } impl TestSyncOracleHandle { @@ -108,8 +109,9 @@ impl SyncOracle for TestSyncOracle { fn make_sync_oracle(val: bool) -> (Box, TestSyncOracleHandle) { let (tx, rx) = oneshot::channel(); let flag = Arc::new(AtomicBool::new(val)); - let oracle = TestSyncOracle { flag, done_syncing_sender: Arc::new(Mutex::new(Some(tx))) }; - let handle = TestSyncOracleHandle { done_syncing_receiver: rx }; + let oracle = + TestSyncOracle { flag: flag.clone(), done_syncing_sender: Arc::new(Mutex::new(Some(tx))) }; + let handle = TestSyncOracleHandle { done_syncing_receiver: rx, is_major_syncing: flag }; (Box::new(oracle), handle) } @@ -465,6 +467,7 @@ struct HarnessConfigBuilder { clock: Option, backend: Option, assignment_criteria: Option>, + major_syncing: bool, } impl HarnessConfigBuilder { @@ -476,9 +479,19 @@ impl HarnessConfigBuilder { self } + pub fn major_syncing(&mut self, value: bool) -> &mut Self { + self.major_syncing = value; + self + } + + pub fn backend(&mut self, store: TestStore) -> &mut Self { + self.backend = Some(store); + self + } + pub fn build(&mut self) -> HarnessConfig { let (sync_oracle, sync_oracle_handle) = - self.sync_oracle.take().unwrap_or_else(|| make_sync_oracle(false)); + self.sync_oracle.take().unwrap_or_else(|| make_sync_oracle(self.major_syncing)); let assignment_criteria = self .assignment_criteria @@ -736,11 +749,13 @@ struct BlockConfig { slot: Slot, candidates: Option>, session_info: Option, + end_syncing: bool, } struct ChainBuilder { blocks_by_hash: HashMap, blocks_at_height: BTreeMap>, + is_major_syncing: Arc, } impl ChainBuilder { @@ -748,16 +763,28 @@ impl ChainBuilder { const GENESIS_PARENT_HASH: Hash = Hash::repeat_byte(0x00); pub fn new() -> Self { - let mut builder = - Self { blocks_by_hash: HashMap::new(), blocks_at_height: BTreeMap::new() }; + let mut builder = Self { + blocks_by_hash: HashMap::new(), + blocks_at_height: BTreeMap::new(), + is_major_syncing: Arc::new(AtomicBool::new(false)), + }; builder.add_block_inner( Self::GENESIS_HASH, Self::GENESIS_PARENT_HASH, 0, - BlockConfig { slot: Slot::from(0), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(0), + candidates: None, + session_info: None, + end_syncing: false, + }, ); builder } + pub fn major_syncing(&mut self, major_syncing: Arc) -> &mut Self { + self.is_major_syncing = major_syncing; + self + } pub fn add_block( &mut self, @@ -808,8 +835,16 @@ impl ChainBuilder { } ancestry.reverse(); - import_block(overseer, ancestry.as_ref(), *number, block_config, false, i > 0) - .await; + import_block( + overseer, + ancestry.as_ref(), + *number, + block_config, + false, + i > 0, + self.is_major_syncing.clone(), + ) + .await; let _: Option<()> = future::pending().timeout(Duration::from_millis(100)).await; } } @@ -863,6 +898,7 @@ async fn import_block( config: &BlockConfig, gap: bool, fork: bool, + major_syncing: Arc, ) { let (new_head, new_header) = &hashes[hashes.len() - 1]; let candidates = config.candidates.clone().unwrap_or(vec![( @@ -891,6 +927,12 @@ async fn import_block( h_tx.send(Ok(Some(new_header.clone()))).unwrap(); } ); + + let is_major_syncing = major_syncing.load(Ordering::SeqCst); + if config.end_syncing { + major_syncing.store(false, Ordering::SeqCst); + } + if !fork { let mut _ancestry_step = 0; if gap { @@ -931,7 +973,7 @@ async fn import_block( } } - if number > 0 { + if number > 0 && !is_major_syncing { assert_matches!( overseer_recv(overseer).await, AllMessages::RuntimeApi( @@ -944,7 +986,6 @@ async fn import_block( c_tx.send(Ok(inclusion_events)).unwrap(); } ); - assert_matches!( overseer_recv(overseer).await, AllMessages::RuntimeApi( @@ -984,14 +1025,14 @@ async fn import_block( ); } - if number == 0 { + if number == 0 && !is_major_syncing { assert_matches!( overseer_recv(overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks(v)) => { assert_eq!(v.len(), 0usize); } ); - } else { + } else if number > 0 && !is_major_syncing { if !fork { // SessionInfo won't be called for forks - it's already cached assert_matches!( @@ -1031,20 +1072,23 @@ async fn import_block( ); } - assert_matches!( - overseer_recv(overseer).await, - AllMessages::ApprovalDistribution( - ApprovalDistributionMessage::NewBlocks(mut approval_vec) - ) => { - assert_eq!(approval_vec.len(), 1); - let metadata = approval_vec.pop().unwrap(); - let hash = &hashes[number as usize]; - let parent_hash = &hashes[(number - 1) as usize]; - assert_eq!(metadata.hash, hash.0.clone()); - assert_eq!(metadata.parent_hash, parent_hash.0.clone()); - assert_eq!(metadata.slot, config.slot); - } - ); + if !is_major_syncing { + assert_matches!( + overseer_recv(overseer).await, + + AllMessages::ApprovalDistribution( + ApprovalDistributionMessage::NewBlocks(mut approval_vec) + ) => { + assert_eq!(approval_vec.len(), 1); + let metadata = approval_vec.pop().unwrap(); + let hash = &hashes[number as usize]; + let parent_hash = &hashes[(number - 1) as usize]; + assert_eq!(metadata.hash, hash.0.clone()); + assert_eq!(metadata.parent_hash, parent_hash.0.clone()); + assert_eq!(metadata.slot, config.slot); + } + ); + } } } @@ -1072,7 +1116,7 @@ fn subsystem_rejects_bad_assignment_ok_criteria() { block_hash, head, 1, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); builder.build(&mut virtual_overseer).await; @@ -1135,7 +1179,7 @@ fn subsystem_rejects_bad_assignment_err_criteria() { block_hash, head, 1, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); builder.build(&mut virtual_overseer).await; @@ -1240,6 +1284,7 @@ fn subsystem_rejects_approval_if_no_candidate_entry() { slot, candidates: Some(vec![(candidate_descriptor, CoreIndex(1), GroupIndex(1))]), session_info: None, + end_syncing: false, }, ); builder.build(&mut virtual_overseer).await; @@ -1345,7 +1390,12 @@ fn subsystem_rejects_approval_before_assignment() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1398,7 +1448,12 @@ fn subsystem_rejects_assignment_in_future() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(0), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(0), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1472,6 +1527,7 @@ fn subsystem_accepts_duplicate_assignment() { (candidate_receipt2, CoreIndex(1), GroupIndex(1)), ]), session_info: None, + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -1537,7 +1593,12 @@ fn subsystem_rejects_assignment_with_unknown_candidate() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1582,7 +1643,12 @@ fn subsystem_rejects_oversized_bitfields() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1650,7 +1716,12 @@ fn subsystem_accepts_and_imports_approval_after_assignment() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1741,6 +1812,7 @@ fn subsystem_second_approval_import_only_schedules_wakeups() { slot: Slot::from(0), candidates: None, session_info: Some(session_info), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -1824,7 +1896,12 @@ fn subsystem_assignment_import_updates_candidate_entry_and_schedules_wakeup() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1873,7 +1950,12 @@ fn subsystem_process_wakeup_schedules_wakeup() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1925,7 +2007,7 @@ fn linear_import_act_on_leaf() { hash, head, i, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); head = hash; } @@ -1983,7 +2065,7 @@ fn forkful_import_at_same_height_act_on_leaf() { hash, head, i, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); head = hash; } @@ -1997,7 +2079,7 @@ fn forkful_import_at_same_height_act_on_leaf() { hash, head, session, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); } builder.build(&mut virtual_overseer).await; @@ -2168,6 +2250,7 @@ fn import_checked_approval_updates_entries_and_schedules() { slot, candidates: Some(vec![(candidate_descriptor, CoreIndex(0), GroupIndex(0))]), session_info: Some(session_info), + end_syncing: false, }, ); builder.build(&mut virtual_overseer).await; @@ -2323,6 +2406,7 @@ fn subsystem_import_checked_approval_sets_one_block_bit_at_a_time() { (candidate_receipt2, CoreIndex(1), GroupIndex(1)), ]), session_info: Some(session_info), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -2445,6 +2529,7 @@ fn approved_ancestor_test( slot: Slot::from(i as u64), candidates: Some(vec![(candidate_receipt, CoreIndex(0), GroupIndex(0))]), session_info: None, + end_syncing: false, }, ); } @@ -2623,13 +2708,19 @@ fn subsystem_validate_approvals_cache() { slot, candidates: candidates.clone(), session_info: Some(session_info.clone()), + end_syncing: false, }, ) .add_block( fork_block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot, candidates, session_info: Some(session_info) }, + BlockConfig { + slot, + candidates, + session_info: Some(session_info), + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -2740,6 +2831,7 @@ fn subsystem_doesnt_distribute_duplicate_compact_assignments() { (candidate_receipt2.clone(), CoreIndex(1), GroupIndex(1)), ]), session_info: None, + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -2997,6 +3089,7 @@ where slot, candidates: Some(vec![(candidate_receipt, CoreIndex(0), GroupIndex(2))]), session_info: Some(session_info), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -3314,6 +3407,7 @@ fn pre_covers_dont_stall_approval() { slot, candidates: Some(vec![(candidate_descriptor, CoreIndex(0), GroupIndex(0))]), session_info: Some(session_info), + end_syncing: false, }, ); builder.build(&mut virtual_overseer).await; @@ -3491,6 +3585,7 @@ fn waits_until_approving_assignments_are_old_enough() { slot, candidates: Some(vec![(candidate_descriptor, CoreIndex(0), GroupIndex(0))]), session_info: Some(session_info), + end_syncing: false, }, ); builder.build(&mut virtual_overseer).await; @@ -3705,6 +3800,7 @@ fn test_approval_is_sent_on_max_approval_coalesce_count() { slot, candidates: candidates.clone(), session_info: Some(session_info.clone()), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -3943,8 +4039,7 @@ fn test_approval_is_sent_on_max_approval_coalesce_wait() { let store = config.backend(); test_harness(config, |test_harness| async move { - let TestHarness { mut virtual_overseer, clock, sync_oracle_handle: _sync_oracle_handle } = - test_harness; + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle: _ } = test_harness; assert_matches!( overseer_recv(&mut virtual_overseer).await, @@ -4006,6 +4101,7 @@ fn test_approval_is_sent_on_max_approval_coalesce_wait() { slot, candidates: candidates.clone(), session_info: Some(session_info.clone()), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -4040,3 +4136,569 @@ fn test_approval_is_sent_on_max_approval_coalesce_wait() { virtual_overseer }); } + +// Builds a chain with a fork where both relay blocks include the same candidate. +async fn build_chain_with_two_blocks_with_one_candidate_each( + block_hash1: Hash, + block_hash2: Hash, + slot: Slot, + sync_oracle_handle: TestSyncOracleHandle, + candidate_receipt: CandidateReceipt, +) -> (ChainBuilder, SessionInfo) { + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Eve, + ]; + let session_info = SessionInfo { + validator_groups: IndexedVec::>::from(vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3), ValidatorIndex(4)], + ]), + ..session_info(&validators) + }; + + let candidates = Some(vec![(candidate_receipt.clone(), CoreIndex(0), GroupIndex(0))]); + let mut chain_builder = ChainBuilder::new(); + + chain_builder + .major_syncing(sync_oracle_handle.is_major_syncing.clone()) + .add_block( + block_hash1, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot, + candidates: candidates.clone(), + session_info: Some(session_info.clone()), + end_syncing: false, + }, + ) + .add_block( + block_hash2, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot, + candidates, + session_info: Some(session_info.clone()), + end_syncing: true, + }, + ); + (chain_builder, session_info) +} + +async fn setup_overseer_with_two_blocks_each_with_one_assignment_triggered( + virtual_overseer: &mut VirtualOverseer, + store: TestStore, + clock: &Box, + sync_oracle_handle: TestSyncOracleHandle, +) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let fork_block_hash = Hash::repeat_byte(0x02); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + let candidate_hash = candidate_receipt.hash(); + let slot = Slot::from(1); + let (chain_builder, _session_info) = build_chain_with_two_blocks_with_one_candidate_each( + block_hash, + fork_block_hash, + slot, + sync_oracle_handle, + candidate_receipt, + ) + .await; + chain_builder.build(virtual_overseer).await; + + assert!(!clock.inner.lock().current_wakeup_is(1)); + clock.inner.lock().wakeup_all(1); + + assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); + clock.inner.lock().wakeup_all(slot_to_tick(slot)); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + let candidate_entry = store.load_candidate_entry(&candidate_hash).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); +} + +// Tests that for candidates that we did not approve yet, for which we triggered the assignment and +// the approval work we restart the work to approve it. +#[test] +fn subsystem_relaunches_approval_work_on_restart() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + let store_clone = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + setup_overseer_with_two_blocks_each_with_one_assignment_triggered( + &mut virtual_overseer, + store, + &clock, + sync_oracle_handle, + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _ + )) => { + } + ); + + // Bail early after the assignment has been distributed but before we answer with the mocked + // approval from CandidateValidation. + virtual_overseer + }); + + // Restart a new approval voting subsystem with the same database and major syncing true untill + // the last leaf. + let config = HarnessConfigBuilder::default().backend(store_clone).major_syncing(true).build(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let fork_block_hash = Hash::repeat_byte(0x02); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + let slot = Slot::from(1); + clock.inner.lock().set_tick(slot_to_tick(slot + 2)); + let (chain_builder, session_info) = build_chain_with_two_blocks_with_one_candidate_each( + block_hash, + fork_block_hash, + slot, + sync_oracle_handle, + candidate_receipt, + ) + .await; + + chain_builder.build(&mut virtual_overseer).await; + + futures_timer::Delay::new(Duration::from_millis(2000)).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionInfo(_, si_tx), + ) + ) => { + si_tx.send(Ok(Some(session_info.clone()))).unwrap(); + } + ); + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionExecutorParams(_, si_tx), + ) + ) => { + // Make sure all SessionExecutorParams calls are not made for the leaf (but for its relay parent) + si_tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); + + // On major syncing ending Approval voting should send all the necessary messages for a + // candidate to be approved. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks( + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + // Guarantees the approval work has been relaunched. + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_kind, + response_sender, + .. + }) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); +} + +// Test that cached approvals, which are candidates that we approved but we didn't issue +// the signature yet because we want to coalesce it with more candidate are sent after restart. +#[test] +fn subsystem_sends_pending_approvals_on_approval_restart() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + let store_clone = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + setup_overseer_with_two_blocks_each_with_one_assignment_triggered( + &mut virtual_overseer, + store, + &clock, + sync_oracle_handle, + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _ + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_kind, + response_sender, + .. + }) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + + // Configure a big coalesce number, so that the signature is cached instead of being sent to + // approval-distribution. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 6, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 6, + })); + } + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); + + let config = HarnessConfigBuilder::default().backend(store_clone).major_syncing(true).build(); + // On restart signatures should be sent to approval-distribution without relaunching the + // approval work. + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let fork_block_hash = Hash::repeat_byte(0x02); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + let slot = Slot::from(1); + + clock.inner.lock().set_tick(slot_to_tick(slot + 2)); + let (chain_builder, session_info) = build_chain_with_two_blocks_with_one_candidate_each( + block_hash, + fork_block_hash, + slot, + sync_oracle_handle, + candidate_receipt, + ) + .await; + chain_builder.build(&mut virtual_overseer).await; + + futures_timer::Delay::new(Duration::from_millis(2000)).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks( + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionInfo(_, si_tx), + ) + ) => { + si_tx.send(Ok(Some(session_info.clone()))).unwrap(); + } + ); + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionExecutorParams(_, si_tx), + ) + ) => { + // Make sure all SessionExecutorParams calls are not made for the leaf (but for its relay parent) + si_tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); +} diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index bf6e09fd1b69b702206eb52090cb4a12812c00c0..8237137fdca015ace87248d062431230a2ad47d4 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -695,6 +695,8 @@ async fn validate_candidate_exhaustive( ))), Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(err))), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::RuntimeConstruction(err))) => + Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(err))), Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(format!( @@ -780,40 +782,50 @@ trait ValidationBackend { return validation_result } + macro_rules! break_if_no_retries_left { + ($counter:ident) => { + if $counter > 0 { + $counter -= 1; + } else { + break + } + }; + } + // Allow limited retries for each kind of error. let mut num_death_retries_left = 1; let mut num_job_error_retries_left = 1; let mut num_internal_retries_left = 1; + let mut num_runtime_construction_retries_left = 1; loop { // Stop retrying if we exceeded the timeout. if total_time_start.elapsed() + retry_delay > exec_timeout { break } - + let mut retry_immediately = false; match validation_result { Err(ValidationError::PossiblyInvalid( PossiblyInvalidError::AmbiguousWorkerDeath | PossiblyInvalidError::AmbiguousJobDeath(_), - )) => - if num_death_retries_left > 0 { - num_death_retries_left -= 1; - } else { - break - }, + )) => break_if_no_retries_left!(num_death_retries_left), Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(_))) => - if num_job_error_retries_left > 0 { - num_job_error_retries_left -= 1; - } else { - break - }, + break_if_no_retries_left!(num_job_error_retries_left), Err(ValidationError::Internal(_)) => - if num_internal_retries_left > 0 { - num_internal_retries_left -= 1; - } else { - break - }, + break_if_no_retries_left!(num_internal_retries_left), + + Err(ValidationError::PossiblyInvalid( + PossiblyInvalidError::RuntimeConstruction(_), + )) => { + break_if_no_retries_left!(num_runtime_construction_retries_left); + self.precheck_pvf(pvf.clone()).await?; + // In this case the error is deterministic + // And a retry forces the ValidationBackend + // to re-prepare the artifact so + // there is no need to wait before the retry + retry_immediately = true; + }, Ok(_) | Err(ValidationError::Invalid(_) | ValidationError::Preparation(_)) => break, } @@ -821,8 +833,11 @@ trait ValidationBackend { // If we got a possibly transient error, retry once after a brief delay, on the // assumption that the conditions that caused this error may have resolved on their own. { - // Wait a brief delay before retrying. - futures_timer::Delay::new(retry_delay).await; + // In case of many transient errors it is necessary to wait a little bit + // for the error to be probably resolved + if !retry_immediately { + futures_timer::Delay::new(retry_delay).await; + } let new_timeout = exec_timeout.saturating_sub(total_time_start.elapsed()); diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 5b62d90c1d4f82c885a650cfb63801358e57277d..f66a66e859ec0a139e31068f78225597d577d1a9 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -23,6 +23,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } [dev-dependencies] +rstest = "0.18.2" assert_matches = "1" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-node-subsystem-types = { path = "../../subsystem-types" } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs index 04ee42a9de062bbd183f92c360703eb12c25f1aa..8061dc82d8358ae9e7c741e3f785634601eb5d03 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs @@ -96,6 +96,7 @@ use std::{ use super::LOG_TARGET; use bitvec::prelude::*; +use polkadot_node_subsystem::messages::Ancestors; use polkadot_node_subsystem_util::inclusion_emulator::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; @@ -756,45 +757,46 @@ impl FragmentTree { depths.iter_ones().collect() } - /// Select `count` candidates after the given `required_path` which pass + /// Select `count` candidates after the given `ancestors` which pass /// the predicate and have not already been backed on chain. /// - /// Does an exhaustive search into the tree starting after `required_path`. - /// If there are multiple possibilities of size `count`, this will select the first one. - /// If there is no chain of size `count` that matches the criteria, this will return the largest - /// chain it could find with the criteria. - /// If there are no candidates meeting those criteria, returns an empty `Vec`. - /// Cycles are accepted, see module docs for the `Cycles` section. + /// Does an exhaustive search into the tree after traversing the ancestors path. + /// If the ancestors draw out a path that can be traversed in multiple ways, no + /// candidates will be returned. + /// If the ancestors do not draw out a full path (the path contains holes), candidates will be + /// suggested that may fill these holes. + /// If the ancestors don't draw out a valid path, no candidates will be returned. If there are + /// multiple possibilities of the same size, this will select the first one. If there is no + /// chain of size `count` that matches the criteria, this will return the largest chain it could + /// find with the criteria. If there are no candidates meeting those criteria, returns an empty + /// `Vec`. + /// Cycles are accepted, but this code expects that the runtime will deduplicate + /// identical candidates when occupying the cores (when proposing to back A->B->A, only A will + /// be backed on chain). /// - /// The intention of the `required_path` is to allow queries on the basis of + /// The intention of the `ancestors` is to allow queries on the basis of /// one or more candidates which were previously pending availability becoming - /// available and opening up more room on the core. - pub(crate) fn select_children( + /// available or candidates timing out. + pub(crate) fn find_backable_chain( &self, - required_path: &[CandidateHash], + ancestors: Ancestors, count: u32, pred: impl Fn(&CandidateHash) -> bool, ) -> Vec { - let base_node = { - // traverse the required path. - let mut node = NodePointer::Root; - for required_step in required_path { - if let Some(next_node) = self.node_candidate_child(node, &required_step) { - node = next_node; - } else { - return vec![] - }; - } - - node - }; - - // TODO: taking the first best selection might introduce bias - // or become gameable. - // - // For plausibly unique parachains, this shouldn't matter much. - // figure out alternative selection criteria? - self.select_children_inner(base_node, count, count, &pred, &mut vec![]) + if count == 0 { + return vec![] + } + // First, we need to order the ancestors. + // The node returned is the one from which we can start finding new backable candidates. + let Some(base_node) = self.find_ancestor_path(ancestors) else { return vec![] }; + + self.find_backable_chain_inner( + base_node, + count, + count, + &pred, + &mut Vec::with_capacity(count as usize), + ) } // Try finding a candidate chain starting from `base_node` of length `expected_count`. @@ -805,7 +807,7 @@ impl FragmentTree { // Cycles are accepted, but this doesn't allow for infinite execution time, because the maximum // depth we'll reach is `expected_count`. // - // Worst case performance is `O(num_forks ^ expected_count)`. + // Worst case performance is `O(num_forks ^ expected_count)`, the same as populating the tree. // Although an exponential function, this is actually a constant that can only be altered via // sudo/governance, because: // 1. `num_forks` at a given level is at most `max_candidate_depth * max_validators_per_core` @@ -817,7 +819,7 @@ impl FragmentTree { // scaling scenario). For non-elastic-scaling, this is just 1. In practice, this should be a // small number (1-3), capped by the total number of available cores (a constant alterable // only via governance/sudo). - fn select_children_inner( + fn find_backable_chain_inner( &self, base_node: NodePointer, expected_count: u32, @@ -857,7 +859,7 @@ impl FragmentTree { for (child_ptr, child_hash) in children { accumulator.push(child_hash); - let result = self.select_children_inner( + let result = self.find_backable_chain_inner( child_ptr, expected_count, remaining_count - 1, @@ -869,6 +871,9 @@ impl FragmentTree { // Short-circuit the search if we've found the right length. Otherwise, we'll // search for a max. + // Taking the first best selection doesn't introduce bias or become gameable, + // because `find_ancestor_path` uses a `HashSet` to track the ancestors, which + // makes the order in which ancestors are visited non-deterministic. if result.len() == expected_count as usize { return result } else if best_result.len() < result.len() { @@ -879,6 +884,93 @@ impl FragmentTree { best_result } + // Orders the ancestors into a viable path from root to the last one. + // Returns a pointer to the last node in the path. + // We assume that the ancestors form a chain (that the + // av-cores do not back parachain forks), None is returned otherwise. + // If we cannot use all ancestors, stop at the first found hole in the chain. This usually + // translates to a timed out candidate. + fn find_ancestor_path(&self, mut ancestors: Ancestors) -> Option { + // The number of elements in the path we've processed so far. + let mut depth = 0; + let mut last_node = NodePointer::Root; + let mut next_node: Option = Some(NodePointer::Root); + + while let Some(node) = next_node { + if depth > self.scope.max_depth { + return None; + } + + last_node = node; + + next_node = match node { + NodePointer::Root => { + let children = self + .nodes + .iter() + .enumerate() + .take_while(|n| n.1.parent == NodePointer::Root) + .map(|(index, node)| (NodePointer::Storage(index), node.candidate_hash)) + .collect::>(); + + self.find_valid_child(&mut ancestors, children.iter()).ok()? + }, + NodePointer::Storage(ptr) => { + let children = self.nodes.get(ptr).and_then(|n| Some(n.children.iter())); + if let Some(children) = children { + self.find_valid_child(&mut ancestors, children).ok()? + } else { + None + } + }, + }; + + depth += 1; + } + + Some(last_node) + } + + // Find a node from the given iterator which is present in the ancestors + // collection. If there are multiple such nodes, return an error and log a warning. We don't + // accept forks in a parachain to be backed. The supplied ancestors should all form a chain. + // If there is no such node, return None. + fn find_valid_child<'a>( + &self, + ancestors: &'a mut Ancestors, + nodes: impl Iterator + 'a, + ) -> Result, ()> { + let mut possible_children = + nodes.filter_map(|(node_ptr, hash)| match ancestors.remove(&hash) { + true => Some(node_ptr), + false => None, + }); + + // We don't accept forks in a parachain to be backed. The supplied ancestors + // should all form a chain. + let next = possible_children.next(); + if let Some(second_child) = possible_children.next() { + if let (Some(NodePointer::Storage(first_child)), NodePointer::Storage(second_child)) = + (next, second_child) + { + gum::error!( + target: LOG_TARGET, + para_id = ?self.scope.para, + relay_parent = ?self.scope.relay_parent, + "Trying to find new backable candidates for a parachain for which we've backed a fork.\ + This is a bug and the runtime should not have allowed it.\n\ + Backed candidates with the same parent: {}, {}", + self.nodes[*first_child].candidate_hash, + self.nodes[*second_child].candidate_hash, + ); + } + + Err(()) + } else { + Ok(next.copied()) + } + } + fn populate_from_bases(&mut self, storage: &CandidateStorage, initial_bases: Vec) { // Populate the tree breadth-first. let mut last_sweep_start = None; @@ -1061,8 +1153,18 @@ mod tests { use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; use polkadot_primitives_test_helpers as test_helpers; + use rstest::rstest; use std::iter; + impl NodePointer { + fn unwrap_idx(self) -> usize { + match self { + NodePointer::Root => panic!("Unexpected root"), + NodePointer::Storage(index) => index, + } + } + } + fn make_constraints( min_relay_parent_number: BlockNumber, valid_watermarks: Vec, @@ -1546,6 +1648,373 @@ mod tests { assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); } + #[test] + fn test_find_ancestor_path_and_find_backable_chain_empty_tree() { + let para_id = ParaId::from(5u32); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 10; + + // Empty tree + let storage = CandidateStorage::new(); + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + + let relay_parent_info = + RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() }; + + let scope = Scope::with_ancestors( + para_id, + relay_parent_info, + base_constraints, + vec![], + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + assert_eq!(tree.candidates().collect::>().len(), 0); + assert_eq!(tree.nodes.len(), 0); + + assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); + assert_eq!(tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]); + // Invalid candidate. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()), Some(NodePointer::Root)); + assert_eq!(tree.find_backable_chain(ancestors, 2, |_| true), vec![]); + } + + #[rstest] + #[case(true, 13)] + #[case(false, 8)] + // The tree with no cycles looks like: + // Make a tree that looks like this (note that there's no cycle): + // +-(root)-+ + // | | + // +----0---+ 7 + // | | + // 1----+ 5 + // | | + // | | + // 2 6 + // | + // 3 + // | + // 4 + // + // The tree with cycles is the same as the first but has a cycle from 4 back to the state + // produced by 0 (It's bounded by the max_depth + 1). + // +-(root)-+ + // | | + // +----0---+ 7 + // | | + // 1----+ 5 + // | | + // | | + // 2 6 + // | + // 3 + // | + // 4---+ + // | | + // 1 5 + // | + // 2 + // | + // 3 + fn test_find_ancestor_path_and_find_backable_chain( + #[case] has_cycle: bool, + #[case] expected_node_count: usize, + ) { + let para_id = ParaId::from(5u32); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 7; + let relay_parent_number = 0; + let relay_parent_storage_root = Hash::repeat_byte(69); + + let mut candidates = vec![]; + + // Candidate 0 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + required_parent.clone(), + vec![0].into(), + 0, + )); + // Candidate 1 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![0].into(), + vec![1].into(), + 0, + )); + // Candidate 2 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![1].into(), + vec![2].into(), + 0, + )); + // Candidate 3 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![2].into(), + vec![3].into(), + 0, + )); + // Candidate 4 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![3].into(), + vec![4].into(), + 0, + )); + // Candidate 5 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![0].into(), + vec![5].into(), + 0, + )); + // Candidate 6 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![1].into(), + vec![6].into(), + 0, + )); + // Candidate 7 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + required_parent.clone(), + vec![7].into(), + 0, + )); + + if has_cycle { + candidates[4] = make_committed_candidate( + para_id, + relay_parent, + 0, + vec![3].into(), + vec![0].into(), // put the cycle here back to the output state of 0. + 0, + ); + } + + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + let mut storage = CandidateStorage::new(); + + let relay_parent_info = RelayChainBlockInfo { + number: relay_parent_number, + hash: relay_parent, + storage_root: relay_parent_storage_root, + }; + + for (pvd, candidate) in candidates.iter() { + storage.add_candidate(candidate.clone(), pvd.clone()).unwrap(); + } + let candidates = + candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::>(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_info, + base_constraints, + vec![], + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + assert_eq!(tree.candidates().collect::>().len(), candidates.len()); + assert_eq!(tree.nodes.len(), expected_node_count); + + // Do some common tests on both trees. + { + // No ancestors supplied. + assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(Ancestors::new(), 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + // Ancestor which is not part of the tree. Will be ignored. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + // A chain fork. + let ancestors: Ancestors = + [(candidates[0].hash()), (candidates[7].hash())].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()), None); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + + // Ancestors which are part of the tree but don't form a path. Will be ignored. + let ancestors: Ancestors = + [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Valid ancestors. + let ancestors: Ancestors = [candidates[7].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[7].hash()); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[2].hash()); + assert_eq!( + tree.find_backable_chain(ancestors.clone(), 2, |_| true), + [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Valid ancestors with candidates which have been omitted due to timeouts + let ancestors: Ancestors = + [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[0].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 3, |_| true), + [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[1].hash()); + if has_cycle { + assert_eq!( + tree.find_backable_chain(ancestors, 2, |_| true), + [2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + } else { + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + } + + let ancestors: Ancestors = + [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + assert_eq!(res, NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Requested count is 0. + assert_eq!(tree.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] + .into_iter() + .collect(); + assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash()].into_iter().collect(); + assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); + } + + // Now do some tests only on the tree with cycles + if has_cycle { + // Exceeds the maximum tree depth. 0-1-2-3-4-1-2-3-4, when the tree stops at + // 0-1-2-3-4-1-2-3. + let ancestors: Ancestors = [ + candidates[0].hash(), + candidates[1].hash(), + candidates[2].hash(), + candidates[3].hash(), + candidates[4].hash(), + ] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[4].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // 0-1-2. + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash(), candidates[2].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[2].hash()); + assert_eq!( + tree.find_backable_chain(ancestors.clone(), 1, |_| true), + [3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + assert_eq!( + tree.find_backable_chain(ancestors, 5, |_| true), + [3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // 0-1 + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[1].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 6, |_| true), + [2, 3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>(), + ); + + // For 0-1-2-3-4-5, there's more than 1 way of finding this path in + // the tree. `None` should be returned. The runtime should not have accepted this. + let ancestors: Ancestors = [ + candidates[0].hash(), + candidates[1].hash(), + candidates[2].hash(), + candidates[3].hash(), + candidates[4].hash(), + candidates[5].hash(), + ] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()); + assert_eq!(res, None); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + } + } + #[test] fn graceful_cycle_of_0() { let mut storage = CandidateStorage::new(); @@ -1602,13 +2071,17 @@ mod tests { for count in 1..10 { assert_eq!( - tree.select_children(&[], count, |_| true), + tree.find_backable_chain(Ancestors::new(), count, |_| true), iter::repeat(candidate_a_hash) .take(std::cmp::min(count as usize, max_depth + 1)) .collect::>() ); assert_eq!( - tree.select_children(&[candidate_a_hash], count - 1, |_| true), + tree.find_backable_chain( + [candidate_a_hash].into_iter().collect(), + count - 1, + |_| true + ), iter::repeat(candidate_a_hash) .take(std::cmp::min(count as usize - 1, max_depth)) .collect::>() @@ -1682,22 +2155,22 @@ mod tests { assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash); assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); - assert_eq!(tree.select_children(&[], 1, |_| true), vec![candidate_a_hash],); + assert_eq!(tree.find_backable_chain(Ancestors::new(), 1, |_| true), vec![candidate_a_hash],); assert_eq!( - tree.select_children(&[], 2, |_| true), + tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![candidate_a_hash, candidate_b_hash], ); assert_eq!( - tree.select_children(&[], 3, |_| true), + tree.find_backable_chain(Ancestors::new(), 3, |_| true), vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], ); assert_eq!( - tree.select_children(&[candidate_a_hash], 2, |_| true), + tree.find_backable_chain([candidate_a_hash].into_iter().collect(), 2, |_| true), vec![candidate_b_hash, candidate_a_hash], ); assert_eq!( - tree.select_children(&[], 6, |_| true), + tree.find_backable_chain(Ancestors::new(), 6, |_| true), vec![ candidate_a_hash, candidate_b_hash, @@ -1706,10 +2179,17 @@ mod tests { candidate_a_hash ], ); - assert_eq!( - tree.select_children(&[candidate_a_hash, candidate_b_hash], 6, |_| true), - vec![candidate_a_hash, candidate_b_hash, candidate_a_hash,], - ); + + for count in 3..7 { + assert_eq!( + tree.find_backable_chain( + [candidate_a_hash, candidate_b_hash].into_iter().collect(), + count, + |_| true + ), + vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], + ); + } } #[test] diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 5937a1c1fb9fdc04eb2fa70e65f92f19d7b99bff..2b14e09b4fb4f56f5f7c6ffd738e4167804f44ad 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -35,7 +35,7 @@ use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ - ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, + Ancestors, ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, HypotheticalFrontierRequest, IntroduceCandidateRequest, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage, RuntimeApiRequest, }, @@ -150,16 +150,9 @@ async fn run_iteration( relay_parent, para, count, - required_path, + ancestors, tx, - ) => answer_get_backable_candidates( - &view, - relay_parent, - para, - count, - required_path, - tx, - ), + ) => answer_get_backable_candidates(&view, relay_parent, para, count, ancestors, tx), ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx) => answer_hypothetical_frontier_request(&view, request, tx), ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => @@ -565,7 +558,7 @@ fn answer_get_backable_candidates( relay_parent: Hash, para: ParaId, count: u32, - required_path: Vec, + ancestors: Ancestors, tx: oneshot::Sender>, ) { let data = match view.active_leaves.get(&relay_parent) { @@ -614,7 +607,7 @@ fn answer_get_backable_candidates( }; let backable_candidates: Vec<_> = tree - .select_children(&required_path, count, |candidate| storage.is_backed(candidate)) + .find_backable_chain(ancestors.clone(), count, |candidate| storage.is_backed(candidate)) .into_iter() .filter_map(|child_hash| { storage.relay_parent_by_candidate_hash(&child_hash).map_or_else( @@ -635,7 +628,7 @@ fn answer_get_backable_candidates( if backable_candidates.is_empty() { gum::trace!( target: LOG_TARGET, - ?required_path, + ?ancestors, para_id = ?para, %relay_parent, "Could not find any backable candidate", diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 732736b101de0ce525c7753af4b60cca7534522b..0beddbf1416a7ec94cb84a6893e9c20cbeaf28ce 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -407,7 +407,7 @@ async fn get_backable_candidates( virtual_overseer: &mut VirtualOverseer, leaf: &TestLeaf, para_id: ParaId, - required_path: Vec, + ancestors: Ancestors, count: u32, expected_result: Vec<(CandidateHash, Hash)>, ) { @@ -415,11 +415,7 @@ async fn get_backable_candidates( virtual_overseer .send(overseer::FromOrchestra::Communication { msg: ProspectiveParachainsMessage::GetBackableCandidates( - leaf.hash, - para_id, - count, - required_path, - tx, + leaf.hash, para_id, count, ancestors, tx, ), }) .await; @@ -903,7 +899,7 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -912,12 +908,20 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), + 0, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::new(), 0, vec![], ) .await; - get_backable_candidates(&mut virtual_overseer, &leaf_a, 1.into(), vec![], 0, vec![]).await; // Second candidates. second_candidate(&mut virtual_overseer, candidate_a.clone()).await; @@ -928,7 +932,7 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -939,12 +943,20 @@ fn check_backable_query_single_candidate() { back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; // Should not get any backable candidates for the other para. - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 1, vec![]).await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 2.into(), - vec![candidate_hash_a], + Ancestors::new(), + 1, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -955,7 +967,7 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![], + Ancestors::new(), 1, vec![(candidate_hash_a, leaf_a.hash)], ) @@ -964,20 +976,20 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![(candidate_hash_b, leaf_a.hash)], ) .await; - // Should not get anything at the wrong path. + // Wrong path get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b], + vec![candidate_hash_b].into_iter().collect(), 1, - vec![], + vec![(candidate_hash_a, leaf_a.hash)], ) .await; @@ -1075,15 +1087,29 @@ fn check_backable_query_multiple_candidates() { make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_i, 10); // Should not get any backable candidates for the other para. - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 1, vec![]) - .await; - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 5, vec![]) - .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 2.into(), - vec![candidate_hash_a], + Ancestors::new(), + 1, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::new(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -1097,7 +1123,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![], + Ancestors::new(), 1, vec![(candidate_hash_a, leaf_a.hash)], ) @@ -1106,7 +1132,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![], + Ancestors::new(), 4, vec![ (candidate_hash_a, leaf_a.hash), @@ -1124,7 +1150,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![(candidate_hash_b, leaf_a.hash)], ) @@ -1133,16 +1159,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], - 2, - vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 3, vec![ (candidate_hash_b, leaf_a.hash), @@ -1159,7 +1176,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), count, vec![ (candidate_hash_c, leaf_a.hash), @@ -1172,26 +1189,30 @@ fn check_backable_query_multiple_candidates() { } } - // required path of 2 + // required path of 2 and higher { get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_b], + vec![candidate_hash_a, candidate_hash_i, candidate_hash_h, candidate_hash_c] + .into_iter() + .collect(), 1, - vec![(candidate_hash_d, leaf_a.hash)], + vec![(candidate_hash_j, leaf_a.hash)], ) .await; + get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_c], + vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), 1, - vec![(candidate_hash_h, leaf_a.hash)], + vec![(candidate_hash_d, leaf_a.hash)], ) .await; + // If the requested count exceeds the largest chain, return the longest // chain we can get. for count in 4..10 { @@ -1199,7 +1220,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_c], + vec![candidate_hash_a, candidate_hash_c].into_iter().collect(), count, vec![ (candidate_hash_h, leaf_a.hash), @@ -1213,317 +1234,127 @@ fn check_backable_query_multiple_candidates() { // No more candidates in any chain. { - let required_paths = vec![ - vec![candidate_hash_a, candidate_hash_b, candidate_hash_e], - vec![ - candidate_hash_a, - candidate_hash_c, - candidate_hash_h, - candidate_hash_i, - candidate_hash_j, - ], - ]; - for path in required_paths { - for count in 1..4 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - path.clone(), - count, - vec![], - ) - .await; - } + for count in 1..4 { + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, candidate_hash_b, candidate_hash_e] + .into_iter() + .collect(), + count, + vec![], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![ + candidate_hash_a, + candidate_hash_c, + candidate_hash_h, + candidate_hash_i, + candidate_hash_j, + ] + .into_iter() + .collect(), + count, + vec![], + ) + .await; } } - // Should not get anything at the wrong path. + // Wrong paths. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b], + vec![candidate_hash_b].into_iter().collect(), 1, - vec![], + vec![(candidate_hash_a, leaf_a.hash)], ) .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b, candidate_hash_a], + vec![candidate_hash_b, candidate_hash_f].into_iter().collect(), 3, - vec![], + vec![ + (candidate_hash_a, leaf_a.hash), + (candidate_hash_b, leaf_a.hash), + (candidate_hash_d, leaf_a.hash), + ], ) .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_b, candidate_hash_c], - 3, - vec![], + vec![candidate_hash_a, candidate_hash_h].into_iter().collect(), + 4, + vec![ + (candidate_hash_c, leaf_a.hash), + (candidate_hash_h, leaf_a.hash), + (candidate_hash_i, leaf_a.hash), + (candidate_hash_j, leaf_a.hash), + ], ) .await; - - virtual_overseer - }); - - assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - // 10 candidates and 7 parents on para 1. - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (7, 10)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); - } - - // A tree with multiple roots. - // Parachain 1 looks like this: - // (imaginary root) - // | | - // +----B---+ A - // | | | | - // | | | C - // D E F | - // | H - // G | - // I - // | - // J - { - let test_state = TestState::default(); - let view = test_harness(|mut virtual_overseer| async move { - // Leaf A - let leaf_a = TestLeaf { - number: 100, - hash: Hash::from_low_u64_be(130), - para_data: vec![ - (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), - (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), - ], - }; - - // Activate leaves. - activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; - - // Candidate B - let (candidate_b, pvd_b) = make_candidate( - leaf_a.hash, - leaf_a.number, + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, 1.into(), - HeadData(vec![1, 2, 3]), - HeadData(vec![2]), - test_state.validation_code_hash, - ); - let candidate_hash_b = candidate_b.hash(); - introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; - second_candidate(&mut virtual_overseer, candidate_b.clone()).await; - back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; + vec![candidate_hash_e, candidate_hash_h].into_iter().collect(), + 2, + vec![(candidate_hash_a, leaf_a.hash), (candidate_hash_b, leaf_a.hash)], + ) + .await; - // Candidate A - let (candidate_a, pvd_a) = make_candidate( - leaf_a.hash, - leaf_a.number, + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, 1.into(), - HeadData(vec![1, 2, 3]), - HeadData(vec![1]), - test_state.validation_code_hash, - ); - let candidate_hash_a = candidate_a.hash(); - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; - back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; - - let (candidate_c, candidate_hash_c) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 3); - let (_candidate_d, candidate_hash_d) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 4); - let (_candidate_e, candidate_hash_e) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 5); - let (candidate_f, candidate_hash_f) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 6); - let (_candidate_g, candidate_hash_g) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_f, 7); - let (candidate_h, candidate_hash_h) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_c, 8); - let (candidate_i, candidate_hash_i) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_h, 9); - let (_candidate_j, candidate_hash_j) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_i, 10); + vec![candidate_hash_a, candidate_hash_c, candidate_hash_d].into_iter().collect(), + 2, + vec![(candidate_hash_h, leaf_a.hash), (candidate_hash_i, leaf_a.hash)], + ) + .await; - // Should not get any backable candidates for the other para. - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 1, vec![]) - .await; - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 5, vec![]) - .await; + // Parachain fork. get_backable_candidates( &mut virtual_overseer, &leaf_a, - 2.into(), - vec![candidate_hash_a], + 1.into(), + vec![candidate_hash_a, candidate_hash_b, candidate_hash_c].into_iter().collect(), 1, vec![], ) .await; - // Test various scenarios with various counts. - - // empty required_path - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![], - 1, - vec![(candidate_hash_b, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![], - 2, - vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![], - 4, - vec![ - (candidate_hash_a, leaf_a.hash), - (candidate_hash_c, leaf_a.hash), - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - ], - ) - .await; - } - - // required path of 1 - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a], - 1, - vec![(candidate_hash_c, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_b], - 1, - vec![(candidate_hash_d, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a], - 2, - vec![(candidate_hash_c, leaf_a.hash), (candidate_hash_h, leaf_a.hash)], - ) - .await; - - // If the requested count exceeds the largest chain, return the longest - // chain we can get. - for count in 2..10 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_b], - count, - vec![(candidate_hash_f, leaf_a.hash), (candidate_hash_g, leaf_a.hash)], - ) - .await; - } - } - - // required path of 2 - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_b, candidate_hash_f], - 1, - vec![(candidate_hash_g, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_c], - 1, - vec![(candidate_hash_h, leaf_a.hash)], - ) - .await; - // If the requested count exceeds the largest chain, return the longest - // chain we can get. - for count in 4..10 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_c], - count, - vec![ - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - (candidate_hash_j, leaf_a.hash), - ], - ) - .await; - } - } - - // No more candidates in any chain. - { - let required_paths = vec![ - vec![candidate_hash_b, candidate_hash_f, candidate_hash_g], - vec![candidate_hash_b, candidate_hash_e], - vec![candidate_hash_b, candidate_hash_d], - vec![ - candidate_hash_a, - candidate_hash_c, - candidate_hash_h, - candidate_hash_i, - candidate_hash_j, - ], - ]; - for path in required_paths { - for count in 1..4 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - path.clone(), - count, - vec![], - ) - .await; - } - } - } + // Non-existent candidate. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, CandidateHash(Hash::from_low_u64_be(100))] + .into_iter() + .collect(), + 2, + vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], + ) + .await; - // Should not get anything at the wrong path. + // Requested count is zero. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_d], - 1, + Ancestors::new(), + 0, vec![], ) .await; @@ -1531,8 +1362,8 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b, candidate_hash_a], - 3, + vec![candidate_hash_a].into_iter().collect(), + 0, vec![], ) .await; @@ -1540,8 +1371,8 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_c, candidate_hash_d], - 3, + vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), + 0, vec![], ) .await; @@ -1853,8 +1684,8 @@ fn check_pvd_query() { assert_eq!(view.candidate_storage.len(), 2); } -// Test simultaneously activating and deactivating leaves, and simultaneously deactivating multiple -// leaves. +// Test simultaneously activating and deactivating leaves, and simultaneously deactivating +// multiple leaves. #[test] fn correctly_updates_leaves() { let test_state = TestState::default(); @@ -2048,7 +1879,7 @@ fn persists_pending_availability_candidate() { &mut virtual_overseer, &leaf_b, para_id, - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![(candidate_hash_b, leaf_b_hash)], ) @@ -2113,7 +1944,7 @@ fn backwards_compatible() { &mut virtual_overseer, &leaf_a, para_id, - vec![], + Ancestors::new(), 1, vec![(candidate_hash_a, candidate_relay_parent)], ) @@ -2135,7 +1966,15 @@ fn backwards_compatible() { ) .await; - get_backable_candidates(&mut virtual_overseer, &leaf_b, para_id, vec![], 1, vec![]).await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + Ancestors::new(), + 1, + vec![], + ) + .await; virtual_overseer }); @@ -2162,13 +2001,13 @@ fn uses_ancestry_only_within_session() { .await; assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) - ) if parent == hash => { - tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len })).unwrap(); - } - ); + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) + ) if parent == hash => { + tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len + })).unwrap(); } + ); assert_matches!( virtual_overseer.recv().await, diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 24cdfd6b57b373712bfdd05e897e2a6b0f1417b3..2a09e2b5b2cc83a0596c6f0ae153e66f8f6a1e66 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -20,9 +20,11 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } futures-timer = "3.0.2" fatality = "0.0.6" +schnellru = "0.2.1" [dev-dependencies] sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +rstest = "0.18.2" diff --git a/polkadot/node/core/provisioner/src/error.rs b/polkadot/node/core/provisioner/src/error.rs index 376d69f276fc892e92828bf994b29f847fae31d0..aae3234c3cc49d19bdaf20b87fa71cf3f3276bfe 100644 --- a/polkadot/node/core/provisioner/src/error.rs +++ b/polkadot/node/core/provisioner/src/error.rs @@ -44,14 +44,17 @@ pub enum Error { #[error("failed to get block number")] CanceledBlockNumber(#[source] oneshot::Canceled), + #[error("failed to get session index")] + CanceledSessionIndex(#[source] oneshot::Canceled), + #[error("failed to get backed candidates")] CanceledBackedCandidates(#[source] oneshot::Canceled), #[error("failed to get votes on dispute")] CanceledCandidateVotes(#[source] oneshot::Canceled), - #[error("failed to get backable candidate from prospective parachains")] - CanceledBackableCandidate(#[source] oneshot::Canceled), + #[error("failed to get backable candidates from prospective parachains")] + CanceledBackableCandidates(#[source] oneshot::Canceled), #[error(transparent)] ChainApi(#[from] ChainApiError), @@ -71,11 +74,6 @@ pub enum Error { #[error("failed to send return message with Inherents")] InherentDataReturnChannel, - #[error( - "backed candidate does not correspond to selected candidate; check logic in provisioner" - )] - BackedCandidateOrderingProblem, - #[fatal] #[error("Failed to spawn background task")] FailedToSpawnBackgroundTask, diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index a29cf72afb14e7ca949b025066daf1c875101427..c9ed873d3c25ae1a0fbe590a51412839e8105634 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -24,26 +24,29 @@ use futures::{ channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered, FutureExt, }; use futures_timer::Delay; +use schnellru::{ByLength, LruMap}; use polkadot_node_subsystem::{ jaeger, messages::{ - CandidateBackingMessage, ChainApiMessage, ProspectiveParachainsMessage, ProvisionableData, - ProvisionerInherentData, ProvisionerMessage, RuntimeApiRequest, + Ancestors, CandidateBackingMessage, ChainApiMessage, ProspectiveParachainsMessage, + ProvisionableData, ProvisionerInherentData, ProvisionerMessage, RuntimeApiRequest, }, overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ has_required_runtime, request_availability_cores, request_persisted_validation_data, - runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, + request_session_index_for_child, + runtime::{prospective_parachains_mode, request_node_features, ProspectiveParachainsMode}, TimeoutExt, }; use polkadot_primitives::{ - BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreState, Hash, Id as ParaId, - OccupiedCoreAssumption, SignedAvailabilityBitfield, ValidatorIndex, + vstaging::{node_features::FeatureIndex, NodeFeatures}, + BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, CoreState, Hash, + Id as ParaId, OccupiedCoreAssumption, SessionIndex, SignedAvailabilityBitfield, ValidatorIndex, }; -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; mod disputes; mod error; @@ -77,11 +80,18 @@ impl ProvisionerSubsystem { } } +/// Per-session info we need for the provisioner subsystem. +pub struct PerSession { + prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, +} + /// A per-relay-parent state for the provisioning subsystem. pub struct PerRelayParent { leaf: ActivatedLeaf, backed_candidates: Vec, prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, signed_bitfields: Vec, is_inherent_ready: bool, awaiting_inherent: Vec>, @@ -89,13 +99,14 @@ pub struct PerRelayParent { } impl PerRelayParent { - fn new(leaf: ActivatedLeaf, prospective_parachains_mode: ProspectiveParachainsMode) -> Self { + fn new(leaf: ActivatedLeaf, per_session: &PerSession) -> Self { let span = PerLeafSpan::new(leaf.span.clone(), "provisioner"); Self { leaf, backed_candidates: Vec::new(), - prospective_parachains_mode, + prospective_parachains_mode: per_session.prospective_parachains_mode, + elastic_scaling_mvp: per_session.elastic_scaling_mvp, signed_bitfields: Vec::new(), is_inherent_ready: false, awaiting_inherent: Vec::new(), @@ -124,10 +135,17 @@ impl ProvisionerSubsystem { async fn run(mut ctx: Context, metrics: Metrics) -> FatalResult<()> { let mut inherent_delays = InherentDelays::new(); let mut per_relay_parent = HashMap::new(); + let mut per_session = LruMap::new(ByLength::new(2)); loop { - let result = - run_iteration(&mut ctx, &mut per_relay_parent, &mut inherent_delays, &metrics).await; + let result = run_iteration( + &mut ctx, + &mut per_relay_parent, + &mut per_session, + &mut inherent_delays, + &metrics, + ) + .await; match result { Ok(()) => break, @@ -142,6 +160,7 @@ async fn run(mut ctx: Context, metrics: Metrics) -> FatalResult<()> { async fn run_iteration( ctx: &mut Context, per_relay_parent: &mut HashMap, + per_session: &mut LruMap, inherent_delays: &mut InherentDelays, metrics: &Metrics, ) -> Result<(), Error> { @@ -151,7 +170,7 @@ async fn run_iteration( // Map the error to ensure that the subsystem exits when the overseer is gone. match from_overseer.map_err(Error::OverseerExited)? { FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => - handle_active_leaves_update(ctx.sender(), update, per_relay_parent, inherent_delays).await?, + handle_active_leaves_update(ctx.sender(), update, per_relay_parent, per_session, inherent_delays).await?, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Communication { msg } => { @@ -183,6 +202,7 @@ async fn handle_active_leaves_update( sender: &mut impl overseer::ProvisionerSenderTrait, update: ActiveLeavesUpdate, per_relay_parent: &mut HashMap, + per_session: &mut LruMap, inherent_delays: &mut InherentDelays, ) -> Result<(), Error> { gum::trace!(target: LOG_TARGET, "Handle ActiveLeavesUpdate"); @@ -191,10 +211,31 @@ async fn handle_active_leaves_update( } if let Some(leaf) = update.activated { + let session_index = request_session_index_for_child(leaf.hash, sender) + .await + .await + .map_err(Error::CanceledSessionIndex)??; + if per_session.get(&session_index).is_none() { + let prospective_parachains_mode = + prospective_parachains_mode(sender, leaf.hash).await?; + let elastic_scaling_mvp = request_node_features(leaf.hash, session_index, sender) + .await? + .unwrap_or(NodeFeatures::EMPTY) + .get(FeatureIndex::ElasticScalingMVP as usize) + .map(|b| *b) + .unwrap_or(false); + + per_session.insert( + session_index, + PerSession { prospective_parachains_mode, elastic_scaling_mvp }, + ); + } + + let session_info = per_session.get(&session_index).expect("Just inserted"); + gum::trace!(target: LOG_TARGET, leaf_hash=?leaf.hash, "Adding delay"); - let prospective_parachains_mode = prospective_parachains_mode(sender, leaf.hash).await?; let delay_fut = Delay::new(PRE_PROPOSE_TIMEOUT).map(move |_| leaf.hash).boxed(); - per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf, prospective_parachains_mode)); + per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf, session_info)); inherent_delays.push(delay_fut); } @@ -253,6 +294,7 @@ async fn send_inherent_data_bg( let signed_bitfields = per_relay_parent.signed_bitfields.clone(); let backed_candidates = per_relay_parent.backed_candidates.clone(); let mode = per_relay_parent.prospective_parachains_mode; + let elastic_scaling_mvp = per_relay_parent.elastic_scaling_mvp; let span = per_relay_parent.span.child("req-inherent-data"); let mut sender = ctx.sender().clone(); @@ -272,6 +314,7 @@ async fn send_inherent_data_bg( &signed_bitfields, &backed_candidates, mode, + elastic_scaling_mvp, return_senders, &mut sender, &metrics, @@ -383,6 +426,7 @@ async fn send_inherent_data( bitfields: &[SignedAvailabilityBitfield], candidates: &[CandidateReceipt], prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, return_senders: Vec>, from_job: &mut impl overseer::ProvisionerSenderTrait, metrics: &Metrics, @@ -434,6 +478,7 @@ async fn send_inherent_data( &bitfields, candidates, prospective_parachains_mode, + elastic_scaling_mvp, leaf.hash, from_job, ) @@ -558,6 +603,8 @@ async fn select_candidate_hashes_from_tracked( let mut selected_candidates = Vec::with_capacity(candidates.len().min(availability_cores.len())); + let mut selected_parachains = + HashSet::with_capacity(candidates.len().min(availability_cores.len())); gum::debug!( target: LOG_TARGET, @@ -591,6 +638,12 @@ async fn select_candidate_hashes_from_tracked( CoreState::Free => continue, }; + if selected_parachains.contains(&scheduled_core.para_id) { + // We already picked a candidate for this parachain. Elastic scaling only works with + // prospective parachains mode. + continue + } + let validation_data = match request_persisted_validation_data( relay_parent, scheduled_core.para_id, @@ -624,6 +677,7 @@ async fn select_candidate_hashes_from_tracked( "Selected candidate receipt", ); + selected_parachains.insert(candidate.descriptor.para_id); selected_candidates.push((candidate_hash, candidate.descriptor.relay_parent)); } } @@ -637,70 +691,93 @@ async fn select_candidate_hashes_from_tracked( /// Should be called when prospective parachains are enabled. async fn request_backable_candidates( availability_cores: &[CoreState], + elastic_scaling_mvp: bool, bitfields: &[SignedAvailabilityBitfield], relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, ) -> Result, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; - let mut selected_candidates = Vec::with_capacity(availability_cores.len()); + // Record how many cores are scheduled for each paraid. Use a BTreeMap because + // we'll need to iterate through them. + let mut scheduled_cores: BTreeMap = BTreeMap::new(); + // The on-chain ancestors of a para present in availability-cores. + let mut ancestors: HashMap = + HashMap::with_capacity(availability_cores.len()); for (core_idx, core) in availability_cores.iter().enumerate() { - let (para_id, required_path) = match core { + let core_idx = CoreIndex(core_idx as u32); + match core { CoreState::Scheduled(scheduled_core) => { - // The core is free, pick the first eligible candidate from - // the fragment tree. - (scheduled_core.para_id, Vec::new()) + *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; }, CoreState::Occupied(occupied_core) => { - if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability) - { + let is_available = bitfields_indicate_availability( + core_idx.0 as usize, + bitfields, + &occupied_core.availability, + ); + + if is_available { + ancestors + .entry(occupied_core.para_id()) + .or_default() + .insert(occupied_core.candidate_hash); + if let Some(ref scheduled_core) = occupied_core.next_up_on_available { - // The candidate occupying the core is available, choose its - // child in the fragment tree. - // - // TODO: doesn't work for on-demand parachains. We lean hard on the - // assumption that cores are fixed to specific parachains within a session. - // https://github.com/paritytech/polkadot/issues/5492 - (scheduled_core.para_id, vec![occupied_core.candidate_hash]) - } else { - continue - } - } else { - if occupied_core.time_out_at != block_number { - continue + // Request a new backable candidate for the newly scheduled para id. + *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; } + } else if occupied_core.time_out_at <= block_number { + // Timed out before being available. + if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out { // Candidate's availability timed out, practically same as scheduled. - (scheduled_core.para_id, Vec::new()) - } else { - continue + *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; } + } else { + // Not timed out and not available. + ancestors + .entry(occupied_core.para_id()) + .or_default() + .insert(occupied_core.candidate_hash); } }, CoreState::Free => continue, }; + } - // We should be calling this once per para rather than per core. - // TODO: Will be fixed in https://github.com/paritytech/polkadot-sdk/pull/3233. - // For now, at least make sure we don't supply the same candidate multiple times in case a - // para has multiple cores scheduled. - let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; - match response { - Some((hash, relay_parent)) => { - if !selected_candidates.iter().any(|bc| &(hash, relay_parent) == bc) { - selected_candidates.push((hash, relay_parent)) - } - }, - None => { - gum::debug!( - target: LOG_TARGET, - leaf_hash = ?relay_parent, - core = core_idx, - "No backable candidate returned by prospective parachains", - ); - }, + let mut selected_candidates: Vec<(CandidateHash, Hash)> = + Vec::with_capacity(availability_cores.len()); + + for (para_id, core_count) in scheduled_cores { + let para_ancestors = ancestors.remove(¶_id).unwrap_or_default(); + + // If elastic scaling MVP is disabled, only allow one candidate per parachain. + if !elastic_scaling_mvp && core_count > 1 { + continue } + + let response = get_backable_candidates( + relay_parent, + para_id, + para_ancestors, + core_count as u32, + sender, + ) + .await?; + + if response.is_empty() { + gum::debug!( + target: LOG_TARGET, + leaf_hash = ?relay_parent, + ?para_id, + "No backable candidate returned by prospective parachains", + ); + continue + } + + selected_candidates.extend(response.into_iter().take(core_count)); } Ok(selected_candidates) @@ -713,6 +790,7 @@ async fn select_candidates( bitfields: &[SignedAvailabilityBitfield], candidates: &[CandidateReceipt], prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, ) -> Result, Error> { @@ -722,7 +800,14 @@ async fn select_candidates( let selected_candidates = match prospective_parachains_mode { ProspectiveParachainsMode::Enabled { .. } => - request_backable_candidates(availability_cores, bitfields, relay_parent, sender).await?, + request_backable_candidates( + availability_cores, + elastic_scaling_mvp, + bitfields, + relay_parent, + sender, + ) + .await?, ProspectiveParachainsMode::Disabled => select_candidate_hashes_from_tracked( availability_cores, @@ -745,24 +830,6 @@ async fn select_candidates( gum::trace!(target: LOG_TARGET, leaf_hash=?relay_parent, "Got {} backed candidates", candidates.len()); - // `selected_candidates` is generated in ascending order by core index, and - // `GetBackedCandidates` _should_ preserve that property, but let's just make sure. - // - // We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected - // candidate maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by - // checking them in order, we can ensure that the backed candidates are also in order. - let mut backed_idx = 0; - for selected in selected_candidates { - if selected.0 == - candidates.get(backed_idx).ok_or(Error::BackedCandidateOrderingProblem)?.hash() - { - backed_idx += 1; - } - } - if candidates.len() != backed_idx { - Err(Error::BackedCandidateOrderingProblem)?; - } - // keep only one candidate with validation code. let mut with_validation_code = false; candidates.retain(|c| { @@ -804,28 +871,27 @@ async fn get_block_number_under_construction( } } -/// Requests backable candidate from Prospective Parachains based on -/// the given path in the fragment tree. -async fn get_backable_candidate( +/// Requests backable candidates from Prospective Parachains based on +/// the given ancestors in the fragment tree. The ancestors may not be ordered. +async fn get_backable_candidates( relay_parent: Hash, para_id: ParaId, - required_path: Vec, + ancestors: Ancestors, + count: u32, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result, Error> { let (tx, rx) = oneshot::channel(); sender .send_message(ProspectiveParachainsMessage::GetBackableCandidates( relay_parent, para_id, - 1, // core count hardcoded to 1, until elastic scaling is implemented and enabled. - required_path, + count, + ancestors, tx, )) .await; - rx.await - .map_err(Error::CanceledBackableCandidate) - .map(|res| res.get(0).copied()) + rx.await.map_err(Error::CanceledBackableCandidates) } /// The availability bitfield for a given core is the transpose diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index 87c0e7a65d35310341fe7df3e6b2cb37239b7313..bdb4f85f4009bdfff879cefcb8928899c5b85c81 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -22,6 +22,9 @@ use polkadot_primitives::{OccupiedCore, ScheduledCore}; const MOCK_GROUP_SIZE: usize = 5; pub fn occupied_core(para_id: u32) -> CoreState { + let mut candidate_descriptor = dummy_candidate_descriptor(dummy_hash()); + candidate_descriptor.para_id = para_id.into(); + CoreState::Occupied(OccupiedCore { group_responsible: para_id.into(), next_up_on_available: None, @@ -29,7 +32,7 @@ pub fn occupied_core(para_id: u32) -> CoreState { time_out_at: 200_u32, next_up_on_time_out: None, availability: bitvec![u8, bitvec::order::Lsb0; 0; 32], - candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), + candidate_descriptor, candidate_hash: Default::default(), }) } @@ -254,10 +257,56 @@ mod select_candidates { use polkadot_primitives::{ BlockNumber, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, }; + use rstest::rstest; const BLOCK_UNDER_PRODUCTION: BlockNumber = 128; - // For test purposes, we always return this set of availability cores: + fn dummy_candidate_template() -> CandidateReceipt { + let empty_hash = PersistedValidationData::::default().hash(); + + let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); + descriptor_template.persisted_validation_data_hash = empty_hash; + CandidateReceipt { + descriptor: descriptor_template, + commitments_hash: CandidateCommitments::default().hash(), + } + } + + fn make_candidates( + core_count: usize, + expected_backed_indices: Vec, + ) -> (Vec, Vec) { + let candidate_template = dummy_candidate_template(); + let candidates: Vec<_> = std::iter::repeat(candidate_template) + .take(core_count) + .enumerate() + .map(|(idx, mut candidate)| { + candidate.descriptor.para_id = idx.into(); + candidate + }) + .collect(); + + let expected_backed = expected_backed_indices + .iter() + .map(|&idx| candidates[idx].clone()) + .map(|c| { + BackedCandidate::new( + CommittedCandidateReceipt { + descriptor: c.descriptor.clone(), + commitments: Default::default(), + }, + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + None, + ) + }) + .collect(); + let candidate_hashes = candidates.into_iter().map(|c| c.hash()).collect(); + + (candidate_hashes, expected_backed) + } + + // For testing only one core assigned to a parachain, we return this set of availability cores: // // [ // 0: Free, @@ -273,7 +322,7 @@ mod select_candidates { // 10: Occupied(both next_up set, not available, timeout), // 11: Occupied(next_up_on_available and available, but different successor para_id) // ] - fn mock_availability_cores() -> Vec { + fn mock_availability_cores_one_per_para() -> Vec { use std::ops::Not; use CoreState::{Free, Scheduled}; @@ -292,6 +341,7 @@ mod select_candidates { build_occupied_core(4, |core| { core.next_up_on_available = Some(scheduled_core(4)); core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(41)); }), // 5: Occupied(next_up_on_time_out set but not timeout), build_occupied_core(5, |core| { @@ -307,12 +357,14 @@ mod select_candidates { build_occupied_core(7, |core| { core.next_up_on_time_out = Some(scheduled_core(7)); core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(71)); }), // 8: Occupied(both next_up set, available), build_occupied_core(8, |core| { core.next_up_on_available = Some(scheduled_core(8)); core.next_up_on_time_out = Some(scheduled_core(8)); core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(81)); }), // 9: Occupied(both next_up set, not available, no timeout), build_occupied_core(9, |core| { @@ -324,6 +376,7 @@ mod select_candidates { core.next_up_on_available = Some(scheduled_core(10)); core.next_up_on_time_out = Some(scheduled_core(10)); core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(101)); }), // 11: Occupied(next_up_on_available and available, but different successor para_id) build_occupied_core(11, |core| { @@ -333,20 +386,189 @@ mod select_candidates { ] } + // For test purposes with multiple possible cores assigned to a para, we always return this set + // of availability cores: + fn mock_availability_cores_multiple_per_para() -> Vec { + use std::ops::Not; + use CoreState::{Free, Scheduled}; + + vec![ + // 0: Free, + Free, + // 1: Scheduled(default), + Scheduled(scheduled_core(1)), + // 2: Occupied(no next_up set), + occupied_core(2), + // 3: Occupied(next_up_on_available set but not available), + build_occupied_core(3, |core| { + core.next_up_on_available = Some(scheduled_core(3)); + }), + // 4: Occupied(next_up_on_available set and available), + build_occupied_core(4, |core| { + core.next_up_on_available = Some(scheduled_core(4)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(41)); + }), + // 5: Occupied(next_up_on_time_out set but not timeout), + build_occupied_core(5, |core| { + core.next_up_on_time_out = Some(scheduled_core(5)); + }), + // 6: Occupied(next_up_on_time_out set and timeout but available), + build_occupied_core(6, |core| { + core.next_up_on_time_out = Some(scheduled_core(6)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.availability = core.availability.clone().not(); + }), + // 7: Occupied(next_up_on_time_out set and timeout and not available), + build_occupied_core(7, |core| { + core.next_up_on_time_out = Some(scheduled_core(7)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(71)); + }), + // 8: Occupied(both next_up set, available), + build_occupied_core(8, |core| { + core.next_up_on_available = Some(scheduled_core(8)); + core.next_up_on_time_out = Some(scheduled_core(8)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(81)); + }), + // 9: Occupied(both next_up set, not available, no timeout), + build_occupied_core(9, |core| { + core.next_up_on_available = Some(scheduled_core(9)); + core.next_up_on_time_out = Some(scheduled_core(9)); + }), + // 10: Occupied(both next_up set, not available, timeout), + build_occupied_core(10, |core| { + core.next_up_on_available = Some(scheduled_core(10)); + core.next_up_on_time_out = Some(scheduled_core(10)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(101)); + }), + // 11: Occupied(next_up_on_available and available, but different successor para_id) + build_occupied_core(11, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + }), + // 12-14: Occupied(next_up_on_available and available, same para_id). + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(121)); + }), + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(122)); + }), + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(123)); + }), + // 15: Scheduled on same para_id as 12-14. + Scheduled(scheduled_core(12)), + // 16: Occupied(13, no next_up set, not available) + build_occupied_core(13, |core| { + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(131)); + }), + // 17: Occupied(13, no next_up set, available) + build_occupied_core(13, |core| { + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(132)); + }), + // 18: Occupied(13, next_up_on_available set to 13 but not available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(133)); + }), + // 19: Occupied(13, next_up_on_available set to 13 and available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(134)); + }), + // 20: Occupied(13, next_up_on_time_out set to 13 but not timeout) + build_occupied_core(13, |core| { + core.next_up_on_time_out = Some(scheduled_core(13)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(135)); + }), + // 21: Occupied(13, next_up_on_available set to 14 and available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(136)); + }), + // 22: Occupied(13, next_up_on_available set to 14 but not available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(137)); + }), + // 23: Occupied(13, both next_up set to 14, available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.next_up_on_time_out = Some(scheduled_core(14)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(138)); + }), + // 24: Occupied(13, both next_up set to 14, not available, timeout) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.next_up_on_time_out = Some(scheduled_core(14)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(1399)); + }), + // 25: Occupied(13, next_up_on_available and available, but successor para_id 15) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(15)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(139)); + }), + // 26: Occupied(15, next_up_on_available and available, but successor para_id 13) + build_occupied_core(15, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(151)); + }), + // 27: Occupied(15, both next_up, both available and timed out) + build_occupied_core(15, |core| { + core.next_up_on_available = Some(scheduled_core(15)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(152)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + }), + // 28: Occupied(13, both next_up set to 13, not available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.next_up_on_time_out = Some(scheduled_core(13)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(1398)); + }), + // 29: Occupied(13, both next_up set to 13, not available, timeout) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.next_up_on_time_out = Some(scheduled_core(13)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(1397)); + }), + ] + } + async fn mock_overseer( mut receiver: mpsc::UnboundedReceiver, - expected: Vec, + mock_availability_cores: Vec, + mut expected: Vec, + mut expected_ancestors: HashMap, Ancestors>, prospective_parachains_mode: ProspectiveParachainsMode, ) { use ChainApiMessage::BlockNumber; use RuntimeApiMessage::Request; + let mut backed_iter = expected.clone().into_iter(); + + expected.sort_by_key(|c| c.candidate().descriptor.para_id); let mut candidates_iter = expected .iter() .map(|candidate| (candidate.hash(), candidate.descriptor().relay_parent)); - let mut backed_iter = expected.clone().into_iter(); - while let Some(from_job) = receiver.next().await { match from_job { AllMessages::ChainApi(BlockNumber(_relay_parent, tx)) => @@ -356,7 +578,7 @@ mod select_candidates { PersistedValidationDataReq(_para_id, _assumption, tx), )) => tx.send(Ok(Some(Default::default()))).unwrap(), AllMessages::RuntimeApi(Request(_parent_hash, AvailabilityCores(tx))) => - tx.send(Ok(mock_availability_cores())).unwrap(), + tx.send(Ok(mock_availability_cores.clone())).unwrap(), AllMessages::CandidateBacking(CandidateBackingMessage::GetBackedCandidates( hashes, sender, @@ -373,35 +595,71 @@ mod select_candidates { let _ = sender.send(response); }, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetBackableCandidates(_, _, count, _, tx), - ) => { - assert_eq!(count, 1); - - match prospective_parachains_mode { - ProspectiveParachainsMode::Enabled { .. } => { - let _ = - tx.send(candidates_iter.next().map_or_else(Vec::new, |c| vec![c])); - }, - ProspectiveParachainsMode::Disabled => - panic!("unexpected prospective parachains request"), - } + ProspectiveParachainsMessage::GetBackableCandidates( + _, + _para_id, + count, + actual_ancestors, + tx, + ), + ) => match prospective_parachains_mode { + ProspectiveParachainsMode::Enabled { .. } => { + assert!(count > 0); + let candidates = + (&mut candidates_iter).take(count as usize).collect::>(); + assert_eq!(candidates.len(), count as usize); + + if !expected_ancestors.is_empty() { + if let Some(expected_required_ancestors) = expected_ancestors.remove( + &(candidates + .clone() + .into_iter() + .take(actual_ancestors.len()) + .map(|(c_hash, _)| c_hash) + .collect::>()), + ) { + assert_eq!(expected_required_ancestors, actual_ancestors); + } else { + assert_eq!(actual_ancestors.len(), 0); + } + } + + let _ = tx.send(candidates); + }, + ProspectiveParachainsMode::Disabled => + panic!("unexpected prospective parachains request"), }, _ => panic!("Unexpected message: {:?}", from_job), } } + + if let ProspectiveParachainsMode::Enabled { .. } = prospective_parachains_mode { + assert_eq!(candidates_iter.next(), None); + } + assert_eq!(expected_ancestors.len(), 0); } - #[test] - fn can_succeed() { + #[rstest] + #[case(ProspectiveParachainsMode::Disabled)] + #[case(ProspectiveParachainsMode::Enabled {max_candidate_depth: 0, allowed_ancestry_len: 0})] + fn can_succeed(#[case] prospective_parachains_mode: ProspectiveParachainsMode) { test_harness( - |r| mock_overseer(r, Vec::new(), ProspectiveParachainsMode::Disabled), + |r| { + mock_overseer( + r, + Vec::new(), + Vec::new(), + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { - let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; select_candidates( &[], &[], &[], prospective_parachains_mode, + false, Default::default(), &mut tx, ) @@ -411,22 +669,22 @@ mod select_candidates { ) } - // this tests that only the appropriate candidates get selected. - // To accomplish this, we supply a candidate list containing one candidate per possible core; - // the candidate selection algorithm must filter them to the appropriate set - #[test] - fn selects_correct_candidates() { - let mock_cores = mock_availability_cores(); - - let empty_hash = PersistedValidationData::::default().hash(); - - let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); - descriptor_template.persisted_validation_data_hash = empty_hash; - let candidate_template = CandidateReceipt { - descriptor: descriptor_template, - commitments_hash: CandidateCommitments::default().hash(), - }; - + // Test candidate selection when prospective parachains mode is disabled. + // This tests that only the appropriate candidates get selected when prospective parachains mode + // is disabled. To accomplish this, we supply a candidate list containing one candidate per + // possible core; the candidate selection algorithm must filter them to the appropriate set + #[rstest] + // why those particular indices? see the comments on mock_availability_cores_*() functions. + #[case(mock_availability_cores_one_per_para(), vec![1, 4, 7, 8, 10], true)] + #[case(mock_availability_cores_one_per_para(), vec![1, 4, 7, 8, 10], false)] + #[case(mock_availability_cores_multiple_per_para(), vec![1, 4, 7, 8, 10, 12, 13, 14, 15], true)] + #[case(mock_availability_cores_multiple_per_para(), vec![1, 4, 7, 8, 10, 12, 13, 14, 15], false)] + fn test_in_subsystem_selection( + #[case] mock_cores: Vec, + #[case] expected_candidates: Vec, + #[case] elastic_scaling_mvp: bool, + ) { + let candidate_template = dummy_candidate_template(); let candidates: Vec<_> = std::iter::repeat(candidate_template) .take(mock_cores.len()) .enumerate() @@ -453,9 +711,8 @@ mod select_candidates { }) .collect(); - // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + expected_candidates.into_iter().map(|idx| candidates[idx].clone()).collect(); let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; let expected_backed = expected_candidates @@ -473,14 +730,24 @@ mod select_candidates { }) .collect(); + let mock_cores_clone = mock_cores.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { - let result = select_candidates( + let result: Vec = select_candidates( &mock_cores, &[], &candidates, prospective_parachains_mode, + elastic_scaling_mvp, Default::default(), &mut tx, ) @@ -498,20 +765,24 @@ mod select_candidates { ) } - #[test] - fn selects_max_one_code_upgrade() { - let mock_cores = mock_availability_cores(); + #[rstest] + #[case(ProspectiveParachainsMode::Disabled)] + #[case(ProspectiveParachainsMode::Enabled {max_candidate_depth: 0, allowed_ancestry_len: 0})] + fn selects_max_one_code_upgrade( + #[case] prospective_parachains_mode: ProspectiveParachainsMode, + ) { + let mock_cores = mock_availability_cores_one_per_para(); let empty_hash = PersistedValidationData::::default().hash(); // why those particular indices? see the comments on mock_availability_cores() - // the first candidate with code is included out of [1, 4, 7, 8, 10]. - let cores = [1, 4, 7, 8, 10]; + // the first candidate with code is included out of [1, 4, 7, 8, 10, 12]. + let cores = [1, 4, 7, 8, 10, 12]; let cores_with_code = [1, 4, 8]; - let expected_cores = [1, 7, 10]; + let expected_cores = [1, 7, 10, 12]; - let committed_receipts: Vec<_> = (0..mock_cores.len()) + let committed_receipts: Vec<_> = (0..=mock_cores.len()) .map(|i| { let mut descriptor = dummy_candidate_descriptor(dummy_hash()); descriptor.para_id = i.into(); @@ -552,23 +823,32 @@ mod select_candidates { let expected_backed_filtered: Vec<_> = expected_cores.iter().map(|&idx| candidates[idx].clone()).collect(); - let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; + let mock_cores_clone = mock_cores.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], &candidates, prospective_parachains_mode, + false, Default::default(), &mut tx, ) .await .unwrap(); - assert_eq!(result.len(), 3); + assert_eq!(result.len(), 4); result.into_iter().for_each(|c| { assert!( @@ -581,66 +861,214 @@ mod select_candidates { ) } - #[test] - fn request_from_prospective_parachains() { - let mock_cores = mock_availability_cores(); - let empty_hash = PersistedValidationData::::default().hash(); + #[rstest] + #[case(true)] + #[case(false)] + fn request_from_prospective_parachains_one_core_per_para(#[case] elastic_scaling_mvp: bool) { + let mock_cores = mock_availability_cores_one_per_para(); - let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); - descriptor_template.persisted_validation_data_hash = empty_hash; - let candidate_template = CandidateReceipt { - descriptor: descriptor_template, - commitments_hash: CandidateCommitments::default().hash(), - }; + // why those particular indices? see the comments on mock_availability_cores() + let expected_candidates: Vec<_> = vec![1, 4, 7, 8, 10, 12]; + let (candidates, expected_candidates) = + make_candidates(mock_cores.len() + 1, expected_candidates); - let candidates: Vec<_> = std::iter::repeat(candidate_template) - .take(mock_cores.len()) - .enumerate() - .map(|(idx, mut candidate)| { - candidate.descriptor.para_id = idx.into(); - candidate - }) - .collect(); + // Expect prospective parachains subsystem requests. + let prospective_parachains_mode = + ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; + + let mut required_ancestors: HashMap, Ancestors> = HashMap::new(); + required_ancestors.insert( + vec![candidates[4]], + vec![CandidateHash(Hash::from_low_u64_be(41))].into_iter().collect(), + ); + required_ancestors.insert( + vec![candidates[8]], + vec![CandidateHash(Hash::from_low_u64_be(81))].into_iter().collect(), + ); + + let mock_cores_clone = mock_cores.clone(); + let expected_candidates_clone = expected_candidates.clone(); + test_harness( + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_candidates_clone, + required_ancestors, + prospective_parachains_mode, + ) + }, + |mut tx: TestSubsystemSender| async move { + let result = select_candidates( + &mock_cores, + &[], + &[], + prospective_parachains_mode, + elastic_scaling_mvp, + Default::default(), + &mut tx, + ) + .await + .unwrap(); + + assert_eq!(result.len(), expected_candidates.len()); + result.into_iter().for_each(|c| { + assert!( + expected_candidates + .iter() + .any(|c2| c.candidate().corresponds_to(&c2.receipt())), + "Failed to find candidate: {:?}", + c, + ) + }); + }, + ) + } + + #[test] + fn request_from_prospective_parachains_multiple_cores_per_para_elastic_scaling_mvp() { + let mock_cores = mock_availability_cores_multiple_per_para(); // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + vec![1, 4, 7, 8, 10, 12, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15]; // Expect prospective parachains subsystem requests. let prospective_parachains_mode = ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; - let expected_backed = expected_candidates - .iter() - .map(|c| { - BackedCandidate::new( - CommittedCandidateReceipt { - descriptor: c.descriptor.clone(), - commitments: Default::default(), - }, - Vec::new(), - default_bitvec(MOCK_GROUP_SIZE), - None, + let (candidates, expected_candidates) = + make_candidates(mock_cores.len(), expected_candidates); + + let mut required_ancestors: HashMap, Ancestors> = HashMap::new(); + required_ancestors.insert( + vec![candidates[4]], + vec![CandidateHash(Hash::from_low_u64_be(41))].into_iter().collect(), + ); + required_ancestors.insert( + vec![candidates[8]], + vec![CandidateHash(Hash::from_low_u64_be(81))].into_iter().collect(), + ); + required_ancestors.insert( + [12, 12, 12].iter().map(|&idx| candidates[idx]).collect::>(), + vec![ + CandidateHash(Hash::from_low_u64_be(121)), + CandidateHash(Hash::from_low_u64_be(122)), + CandidateHash(Hash::from_low_u64_be(123)), + ] + .into_iter() + .collect(), + ); + required_ancestors.insert( + [13, 13, 13].iter().map(|&idx| candidates[idx]).collect::>(), + (131..=139) + .map(|num| CandidateHash(Hash::from_low_u64_be(num))) + .chain(std::iter::once(CandidateHash(Hash::from_low_u64_be(1398)))) + .collect(), + ); + + required_ancestors.insert( + [15, 15].iter().map(|&idx| candidates[idx]).collect::>(), + vec![ + CandidateHash(Hash::from_low_u64_be(151)), + CandidateHash(Hash::from_low_u64_be(152)), + ] + .into_iter() + .collect(), + ); + + let mock_cores_clone = mock_cores.clone(); + let expected_candidates_clone = expected_candidates.clone(); + test_harness( + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_candidates, + required_ancestors, + prospective_parachains_mode, ) - }) - .collect(); + }, + |mut tx: TestSubsystemSender| async move { + let result = select_candidates( + &mock_cores, + &[], + &[], + prospective_parachains_mode, + true, + Default::default(), + &mut tx, + ) + .await + .unwrap(); + assert_eq!(result.len(), expected_candidates_clone.len()); + result.into_iter().for_each(|c| { + assert!( + expected_candidates_clone + .iter() + .any(|c2| c.candidate().corresponds_to(&c2.receipt())), + "Failed to find candidate: {:?}", + c, + ) + }); + }, + ) + } + + #[test] + fn request_from_prospective_parachains_multiple_cores_per_para_elastic_scaling_mvp_disabled() { + let mock_cores = mock_availability_cores_multiple_per_para(); + + // why those particular indices? see the comments on mock_availability_cores() + let expected_candidates: Vec<_> = vec![1, 4, 7, 8, 10]; + // Expect prospective parachains subsystem requests. + let prospective_parachains_mode = + ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; + + let (candidates, expected_candidates) = + make_candidates(mock_cores.len(), expected_candidates); + + let mut required_ancestors: HashMap, Ancestors> = HashMap::new(); + required_ancestors.insert( + vec![candidates[4]], + vec![CandidateHash(Hash::from_low_u64_be(41))].into_iter().collect(), + ); + required_ancestors.insert( + vec![candidates[8]], + vec![CandidateHash(Hash::from_low_u64_be(81))].into_iter().collect(), + ); + + let mock_cores_clone = mock_cores.clone(); + let expected_candidates_clone = expected_candidates.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_candidates, + required_ancestors, + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], &[], prospective_parachains_mode, + false, Default::default(), &mut tx, ) .await .unwrap(); + assert_eq!(result.len(), expected_candidates_clone.len()); result.into_iter().for_each(|c| { assert!( - expected_candidates.iter().any(|c2| c.candidate().corresponds_to(c2)), + expected_candidates_clone + .iter() + .any(|c2| c.candidate().corresponds_to(&c2.receipt())), "Failed to find candidate: {:?}", c, ) @@ -651,18 +1079,11 @@ mod select_candidates { #[test] fn request_receipts_based_on_relay_parent() { - let mock_cores = mock_availability_cores(); - let empty_hash = PersistedValidationData::::default().hash(); - - let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); - descriptor_template.persisted_validation_data_hash = empty_hash; - let candidate_template = CandidateReceipt { - descriptor: descriptor_template, - commitments_hash: CandidateCommitments::default().hash(), - }; + let mock_cores = mock_availability_cores_one_per_para(); + let candidate_template = dummy_candidate_template(); let candidates: Vec<_> = std::iter::repeat(candidate_template) - .take(mock_cores.len()) + .take(mock_cores.len() + 1) .enumerate() .map(|(idx, mut candidate)| { candidate.descriptor.para_id = idx.into(); @@ -673,7 +1094,7 @@ mod select_candidates { // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + [1, 4, 7, 8, 10, 12].iter().map(|&idx| candidates[idx].clone()).collect(); // Expect prospective parachains subsystem requests. let prospective_parachains_mode = ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; @@ -693,14 +1114,24 @@ mod select_candidates { }) .collect(); + let mock_cores_clone = mock_cores.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], &[], prospective_parachains_mode, + false, Default::default(), &mut tx, ) diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs index f8faefc24e65aadb2c50d2375e30fc7d579d6d37..cf274044456f3ea2db6770fbd61b3319f6420996 100644 --- a/polkadot/node/core/pvf/common/src/error.rs +++ b/polkadot/node/core/pvf/common/src/error.rs @@ -16,6 +16,7 @@ use crate::prepare::{PrepareSuccess, PrepareWorkerSuccess}; use parity_scale_codec::{Decode, Encode}; +pub use sc_executor_common::error::Error as ExecuteError; /// Result of PVF preparation from a worker, with checksum of the compiled PVF and stats of the /// preparation if successful. diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index 6b3becf524d71fa2d4f4bf574e0a86d09a7a60d9..18c97b03cbcd6fccc03cb27cb9f9e3aba9c7cf2a 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -40,6 +40,9 @@ pub enum WorkerResponse { }, /// The candidate is invalid. InvalidCandidate(String), + /// Instantiation of the WASM module instance failed during an execution. + /// Possibly related to local issues or dirty node update. May be retried with re-preparation. + RuntimeConstruction(String), /// The job timed out. JobTimedOut, /// The job process has died. We must kill the worker just in case. @@ -68,6 +71,9 @@ pub enum JobResponse { /// The result of parachain validation. result_descriptor: ValidationResult, }, + /// A possibly transient runtime instantiation error happened during the execution; may be + /// retried with re-preparation + RuntimeConstruction(String), /// The candidate is invalid. InvalidCandidate(String), } @@ -81,6 +87,15 @@ impl JobResponse { Self::InvalidCandidate(format!("{}: {}", ctx, msg)) } } + + /// Creates a may retry response from a context `ctx` and a message `msg` (which can be empty). + pub fn runtime_construction(ctx: &'static str, msg: &str) -> Self { + if msg.is_empty() { + Self::RuntimeConstruction(ctx.to_string()) + } else { + Self::RuntimeConstruction(format!("{}: {}", ctx, msg)) + } + } } /// An unexpected error occurred in the execution job process. Because this comes from the job, diff --git a/polkadot/node/core/pvf/common/src/executor_interface.rs b/polkadot/node/core/pvf/common/src/executor_interface.rs index 4cd2f5c85eec53661fde7625bb50a1d195381beb..b69a87890f6038199f98d5d7a5ab257c13125443 100644 --- a/polkadot/node/core/pvf/common/src/executor_interface.rs +++ b/polkadot/node/core/pvf/common/src/executor_interface.rs @@ -16,6 +16,7 @@ //! Interface to the Substrate Executor +use crate::error::ExecuteError; use polkadot_primitives::{ executor_params::{DEFAULT_LOGICAL_STACK_MAX, DEFAULT_NATIVE_STACK_MAX}, ExecutorParam, ExecutorParams, @@ -109,7 +110,7 @@ pub unsafe fn execute_artifact( compiled_artifact_blob: &[u8], executor_params: &ExecutorParams, params: &[u8], -) -> Result, String> { +) -> Result, ExecuteError> { let mut extensions = sp_externalities::Extensions::new(); extensions.register(sp_core::traits::ReadRuntimeVersionExt::new(ReadRuntimeVersion)); @@ -123,7 +124,6 @@ pub unsafe fn execute_artifact( Ok(Ok(ok)) => Ok(ok), Ok(Err(err)) | Err(err) => Err(err), } - .map_err(|err| format!("execute error: {:?}", err)) } /// Constructs the runtime for the given PVF, given the artifact bytes. diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index 0cfa5a786946c7428ea8954515fe03807a90da32..bd7e76010a6dfedb339fd2fc1c17aa7f4007babe 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -16,7 +16,9 @@ //! Contains the logic for executing PVFs. Used by the polkadot-execute-worker binary. -pub use polkadot_node_core_pvf_common::executor_interface::execute_artifact; +pub use polkadot_node_core_pvf_common::{ + error::ExecuteError, executor_interface::execute_artifact, +}; // NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are // separate spawned processes. Run with e.g. `RUST_LOG=parachain::pvf-execute-worker=trace`. @@ -237,7 +239,9 @@ fn validate_using_artifact( // [`executor_interface::prepare`]. execute_artifact(compiled_artifact_blob, executor_params, params) } { - Err(err) => return JobResponse::format_invalid("execute", &err), + Err(ExecuteError::RuntimeConstruction(wasmerr)) => + return JobResponse::runtime_construction("execute", &wasmerr.to_string()), + Err(err) => return JobResponse::format_invalid("execute", &err.to_string()), Ok(d) => d, }; @@ -550,6 +554,8 @@ fn handle_parent_process( Ok(WorkerResponse::Ok { result_descriptor, duration: cpu_tv }) }, Ok(JobResponse::InvalidCandidate(err)) => Ok(WorkerResponse::InvalidCandidate(err)), + Ok(JobResponse::RuntimeConstruction(err)) => + Ok(WorkerResponse::RuntimeConstruction(err)), Err(job_error) => { gum::warn!( target: LOG_TARGET, diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs index 78dfe71adaddcd8d917a639591c102e08ebb7e4b..6288755526d497812027f7bf04e11e5a67ab799e 100644 --- a/polkadot/node/core/pvf/src/artifacts.rs +++ b/polkadot/node/core/pvf/src/artifacts.rs @@ -238,6 +238,14 @@ impl Artifacts { .is_none()); } + /// Remove artifact by its id. + pub fn remove(&mut self, artifact_id: ArtifactId) -> Option<(ArtifactId, PathBuf)> { + self.inner.remove(&artifact_id).and_then(|state| match state { + ArtifactState::Prepared { path, .. } => Some((artifact_id, path)), + _ => None, + }) + } + /// Remove artifacts older than the given TTL and return id and path of the removed ones. pub fn prune(&mut self, artifact_ttl: Duration) -> Vec<(ArtifactId, PathBuf)> { let now = SystemTime::now(); diff --git a/polkadot/node/core/pvf/src/error.rs b/polkadot/node/core/pvf/src/error.rs index 80d41d5c64be1a5cec31aee084a0f536ad0380bf..8dc96305eadb8f3ced1231889f5fa6c5ffaeac57 100644 --- a/polkadot/node/core/pvf/src/error.rs +++ b/polkadot/node/core/pvf/src/error.rs @@ -86,6 +86,10 @@ pub enum PossiblyInvalidError { /// vote invalid. #[error("possibly invalid: job error: {0}")] JobError(String), + /// Instantiation of the WASM module instance failed during an execution. + /// Possibly related to local issues or dirty node update. May be retried with re-preparation. + #[error("possibly invalid: runtime construction: {0}")] + RuntimeConstruction(String), } impl From for ValidationError { diff --git a/polkadot/node/core/pvf/src/execute/mod.rs b/polkadot/node/core/pvf/src/execute/mod.rs index c6d9cf90fa289601f89b0aad307483a002f89427..365e98196cae29dcb9c5085708cfaf7ee8b26238 100644 --- a/polkadot/node/core/pvf/src/execute/mod.rs +++ b/polkadot/node/core/pvf/src/execute/mod.rs @@ -23,4 +23,4 @@ mod queue; mod worker_interface; -pub use queue::{start, PendingExecutionRequest, ToQueue}; +pub use queue::{start, FromQueue, PendingExecutionRequest, ToQueue}; diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index aa91d11781fc625993484fa64ff8ebca4d65aeb2..bdc3c7327b06079eaaeebf737ffa7ca2c8c8270c 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -25,7 +25,7 @@ use crate::{ InvalidCandidate, PossiblyInvalidError, ValidationError, LOG_TARGET, }; use futures::{ - channel::mpsc, + channel::{mpsc, oneshot}, future::BoxFuture, stream::{FuturesUnordered, StreamExt as _}, Future, FutureExt, @@ -54,6 +54,12 @@ pub enum ToQueue { Enqueue { artifact: ArtifactPathId, pending_execution_request: PendingExecutionRequest }, } +/// A response from queue. +#[derive(Debug)] +pub enum FromQueue { + RemoveArtifact { artifact: ArtifactId, reply_to: oneshot::Sender<()> }, +} + /// An execution request that should execute the PVF (known in the context) and send the results /// to the given result sender. #[derive(Debug)] @@ -137,6 +143,8 @@ struct Queue { /// The receiver that receives messages to the pool. to_queue_rx: mpsc::Receiver, + /// The sender to send messages back to validation host. + from_queue_tx: mpsc::UnboundedSender, // Some variables related to the current session. program_path: PathBuf, @@ -161,6 +169,7 @@ impl Queue { node_version: Option, security_status: SecurityStatus, to_queue_rx: mpsc::Receiver, + from_queue_tx: mpsc::UnboundedSender, ) -> Self { Self { metrics, @@ -170,6 +179,7 @@ impl Queue { node_version, security_status, to_queue_rx, + from_queue_tx, queue: VecDeque::new(), mux: Mux::new(), workers: Workers { @@ -301,7 +311,7 @@ async fn handle_mux(queue: &mut Queue, event: QueueEvent) { handle_worker_spawned(queue, idle, handle, job); }, QueueEvent::StartWork(worker, outcome, artifact_id, result_tx) => { - handle_job_finish(queue, worker, outcome, artifact_id, result_tx); + handle_job_finish(queue, worker, outcome, artifact_id, result_tx).await; }, } } @@ -327,42 +337,69 @@ fn handle_worker_spawned( /// If there are pending jobs in the queue, schedules the next of them onto the just freed up /// worker. Otherwise, puts back into the available workers list. -fn handle_job_finish( +async fn handle_job_finish( queue: &mut Queue, worker: Worker, outcome: Outcome, artifact_id: ArtifactId, result_tx: ResultSender, ) { - let (idle_worker, result, duration) = match outcome { + let (idle_worker, result, duration, sync_channel) = match outcome { Outcome::Ok { result_descriptor, duration, idle_worker } => { // TODO: propagate the soft timeout - (Some(idle_worker), Ok(result_descriptor), Some(duration)) + (Some(idle_worker), Ok(result_descriptor), Some(duration), None) }, Outcome::InvalidCandidate { err, idle_worker } => ( Some(idle_worker), Err(ValidationError::Invalid(InvalidCandidate::WorkerReportedInvalid(err))), None, + None, ), - Outcome::InternalError { err } => (None, Err(ValidationError::Internal(err)), None), + Outcome::RuntimeConstruction { err, idle_worker } => { + // The task for artifact removal is executed concurrently with + // the message to the host on the execution result. + let (result_tx, result_rx) = oneshot::channel(); + queue + .from_queue_tx + .unbounded_send(FromQueue::RemoveArtifact { + artifact: artifact_id.clone(), + reply_to: result_tx, + }) + .expect("from execute queue receiver is listened by the host; qed"); + ( + Some(idle_worker), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::RuntimeConstruction( + err, + ))), + None, + Some(result_rx), + ) + }, + Outcome::InternalError { err } => (None, Err(ValidationError::Internal(err)), None, None), // Either the worker or the job timed out. Kill the worker in either case. Treated as // definitely-invalid, because if we timed out, there's no time left for a retry. Outcome::HardTimeout => - (None, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), None), + (None, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), None, None), // "Maybe invalid" errors (will retry). Outcome::WorkerIntfErr => ( None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), None, + None, ), Outcome::JobDied { err } => ( None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))), None, + None, + ), + Outcome::JobError { err } => ( + None, + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))), + None, + None, ), - Outcome::JobError { err } => - (None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))), None), }; queue.metrics.execute_finished(); @@ -386,6 +423,12 @@ fn handle_job_finish( ); } + if let Some(sync_channel) = sync_channel { + // err means the sender is dropped (the artifact is already removed from the cache) + // so that's legitimate to ignore the result + let _ = sync_channel.await; + } + // First we send the result. It may fail due to the other end of the channel being dropped, // that's legitimate and we don't treat that as an error. let _ = result_tx.send(result); @@ -521,8 +564,10 @@ pub fn start( spawn_timeout: Duration, node_version: Option, security_status: SecurityStatus, -) -> (mpsc::Sender, impl Future) { +) -> (mpsc::Sender, mpsc::UnboundedReceiver, impl Future) { let (to_queue_tx, to_queue_rx) = mpsc::channel(20); + let (from_queue_tx, from_queue_rx) = mpsc::unbounded(); + let run = Queue::new( metrics, program_path, @@ -532,7 +577,8 @@ pub fn start( node_version, security_status, to_queue_rx, + from_queue_tx, ) .run(); - (to_queue_tx, run) + (to_queue_tx, from_queue_rx, run) } diff --git a/polkadot/node/core/pvf/src/execute/worker_interface.rs b/polkadot/node/core/pvf/src/execute/worker_interface.rs index 9f7738f00e699ab981d7fa4396fcd09d5e1a4abe..db81da118d7b5563ea7b4bec1b6c76bd3bf842e2 100644 --- a/polkadot/node/core/pvf/src/execute/worker_interface.rs +++ b/polkadot/node/core/pvf/src/execute/worker_interface.rs @@ -87,6 +87,10 @@ pub enum Outcome { /// a trap. Errors related to the preparation process are not expected to be encountered by the /// execution workers. InvalidCandidate { err: String, idle_worker: IdleWorker }, + /// The error is probably transient. It may be for example + /// because the artifact was prepared with a Wasmtime version different from the version + /// in the current execution environment. + RuntimeConstruction { err: String, idle_worker: IdleWorker }, /// The execution time exceeded the hard limit. The worker is terminated. HardTimeout, /// An I/O error happened during communication with the worker. This may mean that the worker @@ -193,6 +197,10 @@ pub async fn start_work( err, idle_worker: IdleWorker { stream, pid, worker_dir }, }, + WorkerResponse::RuntimeConstruction(err) => Outcome::RuntimeConstruction { + err, + idle_worker: IdleWorker { stream, pid, worker_dir }, + }, WorkerResponse::JobTimedOut => Outcome::HardTimeout, WorkerResponse::JobDied { err, job_pid: _ } => Outcome::JobDied { err }, WorkerResponse::JobError(err) => Outcome::JobError { err }, diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index ae9fdc7d2dea81ac85c8c4ad752fca6fbf9a6f55..8ec46f4b08f193082f1fcf8f6a08cae2ef261765 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -274,7 +274,7 @@ pub async fn start( from_prepare_pool, ); - let (to_execute_queue_tx, run_execute_queue) = execute::start( + let (to_execute_queue_tx, from_execute_queue_rx, run_execute_queue) = execute::start( metrics, config.execute_worker_program_path.to_owned(), config.cache_path.clone(), @@ -296,6 +296,7 @@ pub async fn start( to_prepare_queue_tx, from_prepare_queue_rx, to_execute_queue_tx, + from_execute_queue_rx, to_sweeper_tx, awaiting_prepare: AwaitingPrepare::default(), }) @@ -342,6 +343,8 @@ struct Inner { from_prepare_queue_rx: mpsc::UnboundedReceiver, to_execute_queue_tx: mpsc::Sender, + from_execute_queue_rx: mpsc::UnboundedReceiver, + to_sweeper_tx: mpsc::Sender, awaiting_prepare: AwaitingPrepare, @@ -358,6 +361,7 @@ async fn run( to_host_rx, from_prepare_queue_rx, mut to_prepare_queue_tx, + from_execute_queue_rx, mut to_execute_queue_tx, mut to_sweeper_tx, mut awaiting_prepare, @@ -384,10 +388,21 @@ async fn run( let mut to_host_rx = to_host_rx.fuse(); let mut from_prepare_queue_rx = from_prepare_queue_rx.fuse(); + let mut from_execute_queue_rx = from_execute_queue_rx.fuse(); loop { // biased to make it behave deterministically for tests. futures::select_biased! { + from_execute_queue_rx = from_execute_queue_rx.next() => { + let from_queue = break_if_fatal!(from_execute_queue_rx.ok_or(Fatal)); + let execute::FromQueue::RemoveArtifact { artifact, reply_to } = from_queue; + break_if_fatal!(handle_artifact_removal( + &mut to_sweeper_tx, + &mut artifacts, + artifact, + reply_to, + ).await); + }, () = cleanup_pulse.select_next_some() => { // `select_next_some` because we don't expect this to fail, but if it does, we // still don't fail. The trade-off is that the compiled cache will start growing @@ -861,6 +876,37 @@ async fn handle_cleanup_pulse( Ok(()) } +async fn handle_artifact_removal( + sweeper_tx: &mut mpsc::Sender, + artifacts: &mut Artifacts, + artifact_id: ArtifactId, + reply_to: oneshot::Sender<()>, +) -> Result<(), Fatal> { + let (artifact_id, path) = if let Some(artifact) = artifacts.remove(artifact_id) { + artifact + } else { + // if we haven't found the artifact by its id, + // it has been probably removed + // anyway with the randomness of the artifact name + // it is safe to ignore + return Ok(()); + }; + reply_to + .send(()) + .expect("the execute queue waits for the artifact remove confirmation; qed"); + // Thanks to the randomness of the artifact name (see + // `artifacts::generate_artifact_path`) there is no issue with any name conflict on + // future repreparation. + // So we can confirm the artifact removal already + gum::debug!( + target: LOG_TARGET, + validation_code_hash = ?artifact_id.code_hash, + "PVF pruning: pruning artifact by request from the execute queue", + ); + sweeper_tx.send(path).await.map_err(|_| Fatal)?; + Ok(()) +} + /// A simple task which sole purpose is to delete files thrown at it. async fn sweeper_task(mut sweeper_rx: mpsc::Receiver) { loop { @@ -968,6 +1014,8 @@ pub(crate) mod tests { to_prepare_queue_rx: mpsc::Receiver, from_prepare_queue_tx: mpsc::UnboundedSender, to_execute_queue_rx: mpsc::Receiver, + #[allow(unused)] + from_execute_queue_tx: mpsc::UnboundedSender, to_sweeper_rx: mpsc::Receiver, run: BoxFuture<'static, ()>, @@ -979,6 +1027,7 @@ pub(crate) mod tests { let (to_prepare_queue_tx, to_prepare_queue_rx) = mpsc::channel(10); let (from_prepare_queue_tx, from_prepare_queue_rx) = mpsc::unbounded(); let (to_execute_queue_tx, to_execute_queue_rx) = mpsc::channel(10); + let (from_execute_queue_tx, from_execute_queue_rx) = mpsc::unbounded(); let (to_sweeper_tx, to_sweeper_rx) = mpsc::channel(10); let run = run(Inner { @@ -989,6 +1038,7 @@ pub(crate) mod tests { to_prepare_queue_tx, from_prepare_queue_rx, to_execute_queue_tx, + from_execute_queue_rx, to_sweeper_tx, awaiting_prepare: AwaitingPrepare::default(), }) @@ -999,6 +1049,7 @@ pub(crate) mod tests { to_prepare_queue_rx, from_prepare_queue_tx, to_execute_queue_rx, + from_execute_queue_tx, to_sweeper_rx, run, } diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index bcc10749e746d05479997e161329bbc6b4c468ff..cdfbcd8e57855121a7128a0ed616df4f8ecb0cbd 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -21,13 +21,14 @@ use parity_scale_codec::Encode as _; #[cfg(all(feature = "ci-only-tests", target_os = "linux"))] use polkadot_node_core_pvf::SecurityStatus; use polkadot_node_core_pvf::{ - start, testing::build_workers_and_get_paths, Config, InvalidCandidate, Metrics, PrepareError, - PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, + start, testing::build_workers_and_get_paths, Config, InvalidCandidate, Metrics, + PossiblyInvalidError, PrepareError, PrepareJobKind, PvfPrepData, ValidationError, + ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; use polkadot_primitives::{ExecutorParam, ExecutorParams}; -use std::time::Duration; +use std::{io::Write, time::Duration}; use tokio::sync::Mutex; mod adder; @@ -352,10 +353,80 @@ async fn deleting_prepared_artifact_does_not_dispute() { ) .await; - match result { - Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) => {}, - r => panic!("{:?}", r), + assert_matches!(result, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout))); +} + +// Test that corruption of a prepared artifact does not lead to a dispute when we try to execute it. +#[tokio::test] +async fn corrupted_prepared_artifact_does_not_dispute() { + let host = TestHost::new().await; + let cache_dir = host.cache_dir.path(); + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); + + // Manually corrupting the prepared artifact from disk. The in-memory artifacts table won't + // change. + let artifact_path = { + // Get the artifact path (asserting it exists). + let mut cache_dir: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + // Should contain the artifact and the worker dir. + assert_eq!(cache_dir.len(), 2); + let mut artifact_path = cache_dir.pop().unwrap().unwrap(); + if artifact_path.path().is_dir() { + artifact_path = cache_dir.pop().unwrap().unwrap(); + } + + // Corrupt the artifact. + let mut f = std::fs::OpenOptions::new() + .write(true) + .truncate(true) + .open(artifact_path.path()) + .unwrap(); + f.write_all(b"corrupted wasm").unwrap(); + f.flush().unwrap(); + artifact_path + }; + + assert!(artifact_path.path().exists()); + + // Try to validate, artifact should get removed because of the corruption. + let result = host + .validate_candidate( + halt::wasm_binary_unwrap(), + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ) + .await; + + assert_matches!( + result, + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::RuntimeConstruction(_))) + ); + + // because of RuntimeConstruction we may retry + host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); + + // The actual artifact removal is done concurrently + // with sending of the result of the execution + // it is not a problem for further re-preparation as + // artifact filenames are random + for _ in 1..5 { + if !artifact_path.path().exists() { + break; + } + tokio::time::sleep(Duration::from_secs(1)).await; } + + assert!( + !artifact_path.path().exists(), + "the corrupted artifact ({}) should be deleted by the host", + artifact_path.path().display() + ); } #[tokio::test] diff --git a/polkadot/node/malus/src/variants/back_garbage_candidate.rs b/polkadot/node/malus/src/variants/back_garbage_candidate.rs index 82475d291422582f5197613d4f347baaf203b929..b939a2151e2359828da74d317c9e2bc0af2c6e0c 100644 --- a/polkadot/node/malus/src/variants/back_garbage_candidate.rs +++ b/polkadot/node/malus/src/variants/back_garbage_candidate.rs @@ -20,14 +20,13 @@ use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_subsystem::SpawnGlue; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use sp_core::traits::SpawnNamed; use crate::{ @@ -63,13 +62,9 @@ impl OverseerGen for BackGarbageCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { let spawner = args.spawner.clone(); diff --git a/polkadot/node/malus/src/variants/common.rs b/polkadot/node/malus/src/variants/common.rs index 011fcc80e37331909434ccd2fa1bfa1e96f48f35..eb6988f81811687442a36ae3f8ec225de48dd9df 100644 --- a/polkadot/node/malus/src/variants/common.rs +++ b/polkadot/node/malus/src/variants/common.rs @@ -23,10 +23,6 @@ use crate::{ use polkadot_node_core_candidate_validation::find_validation_data; use polkadot_node_primitives::{InvalidCandidate, ValidationResult}; -use polkadot_node_subsystem::{ - messages::{CandidateValidationMessage, ValidationFailed}, - overseer, -}; use polkadot_primitives::{ CandidateCommitments, CandidateDescriptor, CandidateReceipt, PersistedValidationData, diff --git a/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs b/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs index b3555ba2f5beb3e45285bc443dee9a4d029b070b..7a95bdaead26e204bd4cd8b386ae02b5e12297f9 100644 --- a/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs +++ b/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs @@ -34,14 +34,13 @@ use futures::channel::oneshot; use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; -use polkadot_node_subsystem::{messages::ApprovalVotingMessage, SpawnGlue}; -use polkadot_node_subsystem_types::{DefaultSubsystemClient, OverseerSignal}; +use polkadot_node_subsystem::SpawnGlue; +use polkadot_node_subsystem_types::{ChainApiBackend, OverseerSignal, RuntimeApiSubsystemClient}; use polkadot_node_subsystem_util::request_candidate_events; use polkadot_primitives::CandidateEvent; use sp_core::traits::SpawnNamed; @@ -237,13 +236,9 @@ impl OverseerGen for DisputeFinalizedCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { gum::info!( diff --git a/polkadot/node/malus/src/variants/dispute_valid_candidates.rs b/polkadot/node/malus/src/variants/dispute_valid_candidates.rs index b9812cbb5012a4ff97e3f1ed146538147efda03b..a50fdce16e4ec41d6b9bea4f0190616f756d9193 100644 --- a/polkadot/node/malus/src/variants/dispute_valid_candidates.rs +++ b/polkadot/node/malus/src/variants/dispute_valid_candidates.rs @@ -24,14 +24,13 @@ use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_subsystem::SpawnGlue; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use sp_core::traits::SpawnNamed; // Filter wrapping related types. @@ -80,13 +79,9 @@ impl OverseerGen for DisputeValidCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { let spawner = args.spawner.clone(); diff --git a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs index 22b44ddd1dc359620b199118bebf0bb83b69dada..739ed40db362a2ae330f14b7a24cc77c1ab76159 100644 --- a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs @@ -25,14 +25,13 @@ use futures::channel::oneshot; use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_primitives::{AvailableData, BlockData, PoV}; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_primitives::{CandidateDescriptor, CandidateReceipt}; use polkadot_node_subsystem_util::request_validators; @@ -52,7 +51,7 @@ use crate::{ // Import extra types relevant to the particular // subsystem. -use polkadot_node_subsystem::{messages::CandidateBackingMessage, SpawnGlue}; +use polkadot_node_subsystem::SpawnGlue; use std::sync::Arc; @@ -296,13 +295,9 @@ impl OverseerGen for SuggestGarbageCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { gum::info!( diff --git a/polkadot/node/malus/src/variants/support_disabled.rs b/polkadot/node/malus/src/variants/support_disabled.rs index e25df56fd643cdde44b861973751b1cd23e25480..169c442db25b7426f44cdfbe2c4181233802a914 100644 --- a/polkadot/node/malus/src/variants/support_disabled.rs +++ b/polkadot/node/malus/src/variants/support_disabled.rs @@ -19,14 +19,13 @@ use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_subsystem::SpawnGlue; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use sp_core::traits::SpawnNamed; use crate::interceptor::*; @@ -50,13 +49,9 @@ impl OverseerGen for SupportDisabled { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { validator_overseer_builder( diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 432501ed23fbbca5e0ea49c82b8ac19efa5bd473..182d92cb163179eea85cafeb1c605763ac18cde1 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -36,3 +36,14 @@ sc-network = { path = "../../../../substrate/client/network" } futures-timer = "3.0.2" assert_matches = "1.4.0" polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +polkadot-subsystem-bench = { path = "../../subsystem-bench" } + + +[[test]] +name = "availability-distribution-regression-bench" +path = "tests/availability-distribution-regression-bench.rs" +harness = false +required-features = ["subsystem-benchmarks"] + +[features] +subsystem-benchmarks = [] diff --git a/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs new file mode 100644 index 0000000000000000000000000000000000000000..f2872f3c72ba47dafa87dadefce96cf35b97646f --- /dev/null +++ b/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs @@ -0,0 +1,113 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! availability-read regression tests +//! +//! TODO: Explain the test case after configuration adjusted to Kusama +//! +//! Subsystems involved: +//! - availability-distribution +//! - bitfield-distribution +//! - availability-store + +use polkadot_subsystem_bench::{ + availability::{benchmark_availability_write, prepare_test, TestDataAvailability, TestState}, + configuration::{PeerLatency, TestConfiguration}, + usage::BenchmarkUsage, +}; + +const BENCH_COUNT: usize = 3; +const WARM_UP_COUNT: usize = 20; +const WARM_UP_PRECISION: f64 = 0.01; + +fn main() -> Result<(), String> { + let mut messages = vec![]; + + // TODO: Adjust the test configurations to Kusama values + let mut config = TestConfiguration::default(); + config.latency = Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 }); + config.n_validators = 1000; + config.n_cores = 200; + config.max_validators_per_core = 5; + config.min_pov_size = 5120; + config.max_pov_size = 5120; + config.peer_bandwidth = 52428800; + config.bandwidth = 52428800; + config.connectivity = 75; + config.num_blocks = 3; + config.generate_pov_sizes(); + + warm_up(config.clone())?; + let usage = benchmark(config.clone()); + + messages.extend(usage.check_network_usage(&[ + ("Received from peers", 4330.0, 0.05), + ("Sent to peers", 15900.0, 0.05), + ])); + messages.extend(usage.check_cpu_usage(&[ + ("availability-distribution", 0.025, 0.05), + ("bitfield-distribution", 0.085, 0.05), + ("availability-store", 0.180, 0.05), + ])); + + if messages.is_empty() { + Ok(()) + } else { + eprintln!("{}", messages.join("\n")); + Err("Regressions found".to_string()) + } +} + +fn warm_up(config: TestConfiguration) -> Result<(), String> { + println!("Warming up..."); + let mut prev_run: Option = None; + for _ in 0..WARM_UP_COUNT { + let curr = run(config.clone()); + if let Some(ref prev) = prev_run { + let av_distr_diff = + curr.cpu_usage_diff(prev, "availability-distribution").expect("Must exist"); + let bitf_distr_diff = + curr.cpu_usage_diff(prev, "bitfield-distribution").expect("Must exist"); + let av_store_diff = + curr.cpu_usage_diff(prev, "availability-store").expect("Must exist"); + if av_distr_diff < WARM_UP_PRECISION && + bitf_distr_diff < WARM_UP_PRECISION && + av_store_diff < WARM_UP_PRECISION + { + return Ok(()) + } + } + prev_run = Some(curr); + } + + Err("Can't warm up".to_string()) +} + +fn benchmark(config: TestConfiguration) -> BenchmarkUsage { + println!("Benchmarking..."); + let usages: Vec = (0..BENCH_COUNT).map(|_| run(config.clone())).collect(); + let usage = BenchmarkUsage::average(&usages); + println!("{}", usage); + usage +} + +fn run(config: TestConfiguration) -> BenchmarkUsage { + let mut state = TestState::new(&config); + let (mut env, _protocol_config) = + prepare_test(config.clone(), &mut state, TestDataAvailability::Write, false); + env.runtime() + .block_on(benchmark_availability_write("data_availability_write", &mut env, state)) +} diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 9eddf5c86d2ed905888cf5e5fad4dd200fdbee08..12b6ce7a05715480bc6ca79d67742bc089b5f67c 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -41,6 +41,13 @@ sc-network = { path = "../../../../substrate/client/network" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +polkadot-subsystem-bench = { path = "../../subsystem-bench" } + +[[test]] +name = "availability-recovery-regression-bench" +path = "tests/availability-recovery-regression-bench.rs" +harness = false +required-features = ["subsystem-benchmarks"] [features] subsystem-benchmarks = [] diff --git a/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs new file mode 100644 index 0000000000000000000000000000000000000000..beb063e7ae0ddd2c158667287a4800509cbbdc68 --- /dev/null +++ b/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs @@ -0,0 +1,103 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! availability-write regression tests +//! +//! TODO: Explain the test case after configuration adjusted to Kusama +//! +//! Subsystems involved: +//! - availability-recovery + +use polkadot_subsystem_bench::{ + availability::{ + benchmark_availability_read, prepare_test, DataAvailabilityReadOptions, + TestDataAvailability, TestState, + }, + configuration::{PeerLatency, TestConfiguration}, + usage::BenchmarkUsage, +}; + +const BENCH_COUNT: usize = 3; +const WARM_UP_COUNT: usize = 10; +const WARM_UP_PRECISION: f64 = 0.01; + +fn main() -> Result<(), String> { + let mut messages = vec![]; + + // TODO: Adjust the test configurations to Kusama values + let options = DataAvailabilityReadOptions { fetch_from_backers: true }; + let mut config = TestConfiguration::default(); + config.latency = Some(PeerLatency { mean_latency_ms: 100, std_dev: 1.0 }); + config.n_validators = 300; + config.n_cores = 20; + config.min_pov_size = 5120; + config.max_pov_size = 5120; + config.peer_bandwidth = 52428800; + config.bandwidth = 52428800; + config.num_blocks = 3; + config.connectivity = 90; + config.generate_pov_sizes(); + + warm_up(config.clone(), options.clone())?; + let usage = benchmark(config.clone(), options.clone()); + + messages.extend(usage.check_network_usage(&[ + ("Received from peers", 102400.000, 0.05), + ("Sent to peers", 0.335, 0.05), + ])); + messages.extend(usage.check_cpu_usage(&[("availability-recovery", 3.850, 0.05)])); + + if messages.is_empty() { + Ok(()) + } else { + eprintln!("{}", messages.join("\n")); + Err("Regressions found".to_string()) + } +} + +fn warm_up(config: TestConfiguration, options: DataAvailabilityReadOptions) -> Result<(), String> { + println!("Warming up..."); + let mut prev_run: Option = None; + for _ in 0..WARM_UP_COUNT { + let curr = run(config.clone(), options.clone()); + if let Some(ref prev) = prev_run { + let diff = curr.cpu_usage_diff(prev, "availability-recovery").expect("Must exist"); + if diff < WARM_UP_PRECISION { + return Ok(()) + } + } + prev_run = Some(curr); + } + + Err("Can't warm up".to_string()) +} + +fn benchmark(config: TestConfiguration, options: DataAvailabilityReadOptions) -> BenchmarkUsage { + println!("Benchmarking..."); + let usages: Vec = + (0..BENCH_COUNT).map(|_| run(config.clone(), options.clone())).collect(); + let usage = BenchmarkUsage::average(&usages); + println!("{}", usage); + usage +} + +fn run(config: TestConfiguration, options: DataAvailabilityReadOptions) -> BenchmarkUsage { + let mut state = TestState::new(&config); + let (mut env, _protocol_config) = + prepare_test(config.clone(), &mut state, TestDataAvailability::Read(options), false); + env.runtime() + .block_on(benchmark_availability_read("data_availability_read", &mut env, state)) +} diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 6e3eefbcbe8c7bd6029c44e2cc43a2c30ea834f3..d295c21cce1dc9f609c8068fb806cff69612f7d6 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -58,7 +58,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.7.0"; +pub const NODE_VERSION: &'static str = "1.8.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 8fd9f20b7bcfbde6a811fb7a4334242d9a1eb54f..180de70ad6c5c1a37cfbec46ff798b31c281083b 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -93,6 +93,7 @@ kvdb-rocksdb = { version = "0.19.0", optional = true } parity-db = { version = "0.4.12", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } parking_lot = "0.12.1" +bitvec = { version = "1.0.1", optional = true } # Polkadot polkadot-core-primitives = { path = "../../core-primitives" } @@ -184,8 +185,8 @@ full-node = [ ] # Configure the native runtimes to use. -westend-native = ["westend-runtime", "westend-runtime-constants"] -rococo-native = ["rococo-runtime", "rococo-runtime-constants"] +westend-native = ["bitvec", "westend-runtime", "westend-runtime-constants"] +rococo-native = ["bitvec", "rococo-runtime", "rococo-runtime-constants"] runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", @@ -195,6 +196,7 @@ runtime-benchmarks = [ "pallet-babe/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", "pallet-staking/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index 979550c7570643380c5a2e0f0f5de7c049c01f85..fd60cb8b6c1d443d47a85afcb7b885b4cf0593ae 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -16,8 +16,8 @@ "/dns/boot-node.helikon.io/tcp/7062/wss/p2p/12D3KooWL4KPqfAsPE2aY1g5Zo1CxsDwcdJ7mmAghK7cg6M2fdbD", "/dns/kusama.bootnode.amforc.com/tcp/30333/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", "/dns/kusama.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", - "/dns/kusama-bootnode.polkadotters.com/tcp/30333/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", - "/dns/kusama-bootnode.polkadotters.com/tcp/30334/wss/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", + "/dns/kusama.bootnodes.polkadotters.com/tcp/30311/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", + "/dns/kusama.bootnodes.polkadotters.com/tcp/30313/wss/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", "/dns/boot-cr.gatotech.network/tcp/33200/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", "/dns/boot-cr.gatotech.network/tcp/35200/wss/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", "/dns/boot-kusama.metaspan.io/tcp/23012/p2p/12D3KooWE1tq9ZL9AAxMiUBBqy1ENmh5pwfWabnoBPMo8gFPXhn6", diff --git a/polkadot/node/service/chain-specs/paseo.json b/polkadot/node/service/chain-specs/paseo.json index c8f2b58533c62af5ee3b71ffa5d782c852edd162..8db75ff137b0dbe338859a22ad7361f413c0fbea 100644 --- a/polkadot/node/service/chain-specs/paseo.json +++ b/polkadot/node/service/chain-specs/paseo.json @@ -12,7 +12,9 @@ "/dns/boot-node.helikon.io/tcp/10020/p2p/12D3KooWBetfzZpf6tGihKrqCo5z854Ub4ZNAUUTRT6eYHNh7FYi", "/dns/boot-node.helikon.io/tcp/10022/wss/p2p/12D3KooWBetfzZpf6tGihKrqCo5z854Ub4ZNAUUTRT6eYHNh7FYi", "/dns/pso16.rotko.net/tcp/33246/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu", - "/dns/pso16.rotko.net/tcp/35246/wss/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu" + "/dns/pso16.rotko.net/tcp/35246/wss/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu", + "/dns/paseo.bootnodes.polkadotters.com/tcp/30538/p2p/12D3KooWPbbFy4TefEGTRF5eTYhq8LEzc4VAHdNUVCbY4nAnhqPP", + "/dns/paseo.bootnodes.polkadotters.com/tcp/30540/wss/p2p/12D3KooWPbbFy4TefEGTRF5eTYhq8LEzc4VAHdNUVCbY4nAnhqPP" ], "telemetryEndpoints": [ [ diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index 71dbb9004038d1394cebe568627e1e6163ee9049..c235cdd09d8b6eee517909052556a02427495d09 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -17,8 +17,8 @@ "/dns/boot-node.helikon.io/tcp/7072/wss/p2p/12D3KooWS9ZcvRxyzrSf6p63QfTCWs12nLoNKhGux865crgxVA4H", "/dns/polkadot.bootnode.amforc.com/tcp/30333/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", "/dns/polkadot.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", - "/dns/polkadot-bootnode.polkadotters.com/tcp/30333/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", - "/dns/polkadot-bootnode.polkadotters.com/tcp/30334/wss/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", + "/dns/polkadot.bootnodes.polkadotters.com/tcp/30314/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", + "/dns/polkadot.bootnodes.polkadotters.com/tcp/30316/wss/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", "/dns/boot-cr.gatotech.network/tcp/33100/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", "/dns/boot-cr.gatotech.network/tcp/35100/wss/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", "/dns/boot-polkadot.metaspan.io/tcp/13012/p2p/12D3KooWRjHFApinuqSBjoaDjQHvxwubQSpEVy5hrgC9Smvh92WF", diff --git a/polkadot/node/service/chain-specs/westend.json b/polkadot/node/service/chain-specs/westend.json index 697675871fcd7b4b11cac5a25e71c704d471cbde..775f3e72ac75578a0f0166cba96531244af760c9 100644 --- a/polkadot/node/service/chain-specs/westend.json +++ b/polkadot/node/service/chain-specs/westend.json @@ -14,8 +14,8 @@ "/dns/boot-node.helikon.io/tcp/7082/wss/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC", "/dns/westend.bootnode.amforc.com/tcp/30333/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", "/dns/westend.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", - "/dns/westend-bootnode.polkadotters.com/tcp/30333/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", - "/dns/westend-bootnode.polkadotters.com/tcp/30334/wss/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", + "/dns/westend.bootnodes.polkadotters.com/tcp/30308/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", + "/dns/westend.bootnodes.polkadotters.com/tcp/30310/wss/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", "/dns/boot-cr.gatotech.network/tcp/33300/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", "/dns/boot-cr.gatotech.network/tcp/35300/wss/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", "/dns/boot-westend.metaspan.io/tcp/33012/p2p/12D3KooWNTau7iG4G9cUJSwwt2QJP1W88pUf2SgqsHjRU2RL8pfa", diff --git a/polkadot/node/service/src/benchmarking.rs b/polkadot/node/service/src/benchmarking.rs index 400daf1aee3448f841f9030f24b78a0029fc0830..29acc5c3ca8c3af835e5ea46e14a31e3361162af 100644 --- a/polkadot/node/service/src/benchmarking.rs +++ b/polkadot/node/service/src/benchmarking.rs @@ -189,7 +189,7 @@ fn westend_sign_call( use sp_core::Pair; use westend_runtime as runtime; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -201,11 +201,12 @@ fn westend_sign_call( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), - ); + ) + .into(); let payload = runtime::SignedPayload::from_raw( call.clone(), - extra.clone(), + tx_ext.clone(), ( (), runtime::VERSION.spec_version, @@ -223,7 +224,7 @@ fn westend_sign_call( call, sp_runtime::AccountId32::from(acc.public()).into(), polkadot_core_primitives::Signature::Sr25519(signature.clone()), - extra, + tx_ext, ) .into() } @@ -241,7 +242,7 @@ fn rococo_sign_call( use rococo_runtime as runtime; use sp_core::Pair; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -253,11 +254,12 @@ fn rococo_sign_call( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), - ); + ) + .into(); let payload = runtime::SignedPayload::from_raw( call.clone(), - extra.clone(), + tx_ext.clone(), ( (), runtime::VERSION.spec_version, @@ -275,7 +277,7 @@ fn rococo_sign_call( call, sp_runtime::AccountId32::from(acc.public()).into(), polkadot_core_primitives::Signature::Sr25519(signature.clone()), - extra, + tx_ext, ) .into() } diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index af241d1cbc558c849bbf533dd644848b20fa4491..1c44b17b6fd24810bf896a64953a1b2f53b490da 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -24,6 +24,8 @@ use polkadot_primitives::{AccountId, AccountPublic, AssignmentId, ValidatorId}; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; +#[cfg(any(feature = "rococo-native", feature = "westend-native",))] +use polkadot_primitives::vstaging::SchedulerParams; #[cfg(feature = "rococo-native")] use rococo_runtime as rococo; #[cfg(feature = "rococo-native")] @@ -120,7 +122,9 @@ pub fn wococo_config() -> Result { fn default_parachains_host_configuration( ) -> polkadot_runtime_parachains::configuration::HostConfiguration { - use polkadot_primitives::{AsyncBackingParams, MAX_CODE_SIZE, MAX_POV_SIZE}; + use polkadot_primitives::{ + vstaging::node_features::FeatureIndex, AsyncBackingParams, MAX_CODE_SIZE, MAX_POV_SIZE, + }; polkadot_runtime_parachains::configuration::HostConfiguration { validation_upgrade_cooldown: 2u32, @@ -129,8 +133,6 @@ fn default_parachains_host_configuration( max_code_size: MAX_CODE_SIZE, max_pov_size: MAX_POV_SIZE, max_head_data_size: 32 * 1024, - group_rotation_frequency: 20, - paras_availability_period: 4, max_upward_queue_count: 8, max_upward_queue_size: 1024 * 1024, max_downward_message_size: 1024 * 1024, @@ -151,11 +153,19 @@ fn default_parachains_host_configuration( relay_vrf_modulo_samples: 2, zeroth_delay_tranche_width: 0, minimum_validation_upgrade_delay: 5, - scheduling_lookahead: 2, async_backing_params: AsyncBackingParams { max_candidate_depth: 3, allowed_ancestry_len: 2, }, + node_features: bitvec::vec::BitVec::from_element( + 1u8 << (FeatureIndex::ElasticScalingMVP as usize), + ), + scheduler_params: SchedulerParams { + lookahead: 2, + group_rotation_frequency: 20, + paras_availability_period: 4, + ..Default::default() + }, ..Default::default() } } @@ -891,7 +901,10 @@ pub fn rococo_testnet_genesis( "sudo": { "key": Some(root_key.clone()) }, "configuration": { "config": polkadot_runtime_parachains::configuration::HostConfiguration { - max_validators_per_core: Some(1), + scheduler_params: SchedulerParams { + max_validators_per_core: Some(1), + ..default_parachains_host_configuration().scheduler_params + }, ..default_parachains_host_configuration() }, }, diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index ccc3da22400dfc38f5e94aa4f6e89969499dbee8..085ea93fdc78526f793b726bf385124ec6418e5d 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -60,7 +60,7 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 6dcdec07ca84bd0fe9456e2bc29cbdf6cee6a9f8..83a0afc077e7edfb4a60fb8ed4109157f020695b 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -92,6 +92,7 @@ pub use chain_spec::{GenericChainSpec, RococoChainSpec, WestendChainSpec}; pub use consensus_common::{Proposal, SelectChain}; use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; use mmr_gadget::MmrGadget; +use polkadot_node_subsystem_types::DefaultSubsystemClient; pub use polkadot_primitives::{Block, BlockId, BlockNumber, CollatorPair, Hash, Id as ParaId}; pub use sc_client_api::{Backend, CallExecutor}; pub use sc_consensus::{BlockImport, LongestChain}; @@ -1081,12 +1082,17 @@ pub fn new_full( None }; + let runtime_client = Arc::new(DefaultSubsystemClient::new( + overseer_client.clone(), + OffchainTransactionPoolFactory::new(transaction_pool.clone()), + )); + let overseer_handle = if let Some(authority_discovery_service) = authority_discovery_service { let (overseer, overseer_handle) = overseer_gen - .generate::( + .generate::>( overseer_connector, OverseerGenArgs { - runtime_client: overseer_client.clone(), + runtime_client, network_service: network.clone(), sync_service: sync_service.clone(), authority_discovery_service, @@ -1099,9 +1105,6 @@ pub fn new_full( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - offchain_transaction_pool_factory: OffchainTransactionPoolFactory::new( - transaction_pool.clone(), - ), notification_services, }, ext_overseer_args, diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index b2b0786bfc24e5f702ddd82aec0676f0f9f795a8..a8167370464937ac91ae44056ea0b17b7184927a 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -14,10 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{AuthorityDiscoveryApi, Block, Error, Hash, IsParachainNode, Registry}; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use super::{Block, Error, Hash, IsParachainNode, Registry}; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_overseer::{DummySubsystem, InitializedOverseerBuilder, SubsystemError}; -use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_core::traits::SpawnNamed; use polkadot_availability_distribution::IncomingRequestReceivers; @@ -40,14 +39,10 @@ use polkadot_overseer::{ }; use parking_lot::Mutex; -use polkadot_primitives::runtime_api::ParachainHost; use sc_authority_discovery::Service as AuthorityDiscoveryService; use sc_client_api::AuxStore; use sc_keystore::LocalKeystore; use sc_network::{NetworkStateInfo, NotificationService}; -use sp_api::ProvideRuntimeApi; -use sp_blockchain::HeaderBackend; -use sp_consensus_babe::BabeApi; use std::{collections::HashMap, sync::Arc}; pub use polkadot_approval_distribution::ApprovalDistribution as ApprovalDistributionSubsystem; @@ -80,8 +75,6 @@ pub use polkadot_statement_distribution::StatementDistributionSubsystem; /// Arguments passed for overseer construction. pub struct OverseerGenArgs<'a, Spawner, RuntimeClient> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, Spawner: 'static + SpawnNamed + Clone + Unpin, { /// Runtime client generic, providing the `ProvieRuntimeApi` trait besides others. @@ -89,7 +82,7 @@ where /// Underlying network service implementation. pub network_service: Arc>, /// Underlying syncing service implementation. - pub sync_service: Arc>, + pub sync_service: Arc, /// Underlying authority discovery service. pub authority_discovery_service: AuthorityDiscoveryService, /// Collations request receiver for network protocol v1. @@ -111,8 +104,6 @@ where pub req_protocol_names: ReqProtocolNames, /// `PeerSet` protocol names to protocols mapping. pub peerset_protocol_names: PeerSetProtocolNames, - /// The offchain transaction pool factory. - pub offchain_transaction_pool_factory: OffchainTransactionPoolFactory, /// Notification services for validation/collation protocols. pub notification_services: HashMap>, } @@ -160,7 +151,6 @@ pub fn validator_overseer_builder( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - offchain_transaction_pool_factory, notification_services, }: OverseerGenArgs, ExtendedOverseerGenArgs { @@ -180,7 +170,7 @@ pub fn validator_overseer_builder( ) -> Result< InitializedOverseerBuilder< SpawnGlue, - Arc>, + Arc, CandidateValidationSubsystem, PvfCheckerSubsystem, CandidateBackingSubsystem, @@ -190,7 +180,7 @@ pub fn validator_overseer_builder( BitfieldSigningSubsystem, BitfieldDistributionSubsystem, ProvisionerSubsystem, - RuntimeApiSubsystem>, + RuntimeApiSubsystem, AvailabilityStoreSubsystem, NetworkBridgeRxSubsystem< Arc>, @@ -214,8 +204,7 @@ pub fn validator_overseer_builder( Error, > where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { use polkadot_node_subsystem_util::metrics::Metrics; @@ -227,11 +216,6 @@ where let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; - let runtime_api_client = Arc::new(DefaultSubsystemClient::new( - runtime_client.clone(), - offchain_transaction_pool_factory, - )); - let builder = Overseer::builder() .network_bridge_tx(NetworkBridgeTxSubsystem::new( network_service.clone(), @@ -298,7 +282,7 @@ where }) .provisioner(ProvisionerSubsystem::new(Metrics::register(registry)?)) .runtime_api(RuntimeApiSubsystem::new( - runtime_api_client.clone(), + runtime_client.clone(), Metrics::register(registry)?, spawner.clone(), )) @@ -339,7 +323,7 @@ where .activation_external_listeners(Default::default()) .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) - .supports_parachains(runtime_api_client) + .supports_parachains(runtime_client) .metrics(metrics) .spawner(spawner); @@ -367,13 +351,12 @@ pub fn collator_overseer_builder( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - offchain_transaction_pool_factory, notification_services, }: OverseerGenArgs, ) -> Result< InitializedOverseerBuilder< SpawnGlue, - Arc>, + Arc, DummySubsystem, DummySubsystem, DummySubsystem, @@ -383,7 +366,7 @@ pub fn collator_overseer_builder( DummySubsystem, DummySubsystem, DummySubsystem, - RuntimeApiSubsystem>, + RuntimeApiSubsystem, DummySubsystem, NetworkBridgeRxSubsystem< Arc>, @@ -407,24 +390,17 @@ pub fn collator_overseer_builder( Error, > where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, Spawner: 'static + SpawnNamed + Clone + Unpin, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, { use polkadot_node_subsystem_util::metrics::Metrics; - let metrics = ::register(registry)?; let notification_sinks = Arc::new(Mutex::new(HashMap::new())); let spawner = SpawnGlue(spawner); let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; - let runtime_api_client = Arc::new(DefaultSubsystemClient::new( - runtime_client.clone(), - offchain_transaction_pool_factory, - )); - let builder = Overseer::builder() .network_bridge_tx(NetworkBridgeTxSubsystem::new( network_service.clone(), @@ -475,7 +451,7 @@ where }) .provisioner(DummySubsystem) .runtime_api(RuntimeApiSubsystem::new( - runtime_api_client.clone(), + runtime_client.clone(), Metrics::register(registry)?, spawner.clone(), )) @@ -490,8 +466,8 @@ where .activation_external_listeners(Default::default()) .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) - .supports_parachains(runtime_api_client) - .metrics(metrics) + .supports_parachains(runtime_client) + .metrics(Metrics::register(registry)?) .spawner(spawner); let builder = if let Some(capacity) = overseer_message_channel_capacity_override { @@ -510,13 +486,9 @@ pub trait OverseerGen { connector: OverseerConnector, args: OverseerGenArgs, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin; // It would be nice to make `create_subsystems` part of this trait, @@ -533,13 +505,9 @@ impl OverseerGen for ValidatorOverseerGen { connector: OverseerConnector, args: OverseerGenArgs, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { let ext_args = ext_args.ok_or(Error::Overseer(SubsystemError::Context( @@ -561,13 +529,9 @@ impl OverseerGen for CollatorOverseerGen { connector: OverseerConnector, args: OverseerGenArgs, _ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { collator_overseer_builder(args)? diff --git a/polkadot/node/service/src/parachains_db/upgrade.rs b/polkadot/node/service/src/parachains_db/upgrade.rs index d22eebb5c8d4edebdd2174f3cb1ba144fea0b130..2eceb391b15c278825b243fdb8dc21e8ef540390 100644 --- a/polkadot/node/service/src/parachains_db/upgrade.rs +++ b/polkadot/node/service/src/parachains_db/upgrade.rs @@ -93,7 +93,7 @@ pub(crate) fn try_upgrade_db( } /// Try upgrading parachain's database to the next version. -/// If successfull, it returns the current version. +/// If successful, it returns the current version. pub(crate) fn try_upgrade_db_to_next_version( db_path: &Path, db_kind: DatabaseKind, diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 7de91d8cd5ded29a6ad2f2f033cd0dfc5ca580d5..726e7de4587c1c99c102d8b03b0c5fece058de0d 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -8,9 +8,13 @@ license.workspace = true readme = "README.md" publish = false +[lib] +name = "polkadot_subsystem_bench" +path = "src/lib/lib.rs" + [[bin]] name = "subsystem-bench" -path = "src/subsystem-bench.rs" +path = "src/cli/subsystem-bench.rs" # Prevent rustdoc error. Already documented from top-level Cargo.toml. doc = false diff --git a/polkadot/node/subsystem-bench/README.md b/polkadot/node/subsystem-bench/README.md index e090a0392cb733ffc2a4b21edc73da44e5aed708..3aac2810ad58f8fe732155aa5d8be611c44426c2 100644 --- a/polkadot/node/subsystem-bench/README.md +++ b/polkadot/node/subsystem-bench/README.md @@ -1,6 +1,6 @@ # Subsystem benchmark client -Run parachain consensus stress and performance tests on your development machine. +Run parachain consensus stress and performance tests on your development machine or in CI. ## Motivation @@ -32,7 +32,8 @@ a local Grafana/Prometheus stack is needed. ### Run Prometheus, Pyroscope and Graphana in Docker -If docker is not usable, then follow the next sections to manually install Prometheus, Pyroscope and Graphana on your machine. +If docker is not usable, then follow the next sections to manually install Prometheus, Pyroscope and Graphana +on your machine. ```bash cd polkadot/node/subsystem-bench/docker @@ -95,39 +96,16 @@ If you are running the servers in Docker, use the following URLs: Follow [this guide](https://grafana.com/docs/grafana/latest/dashboards/manage-dashboards/#export-and-import-dashboards) to import the dashboards from the repository `grafana` folder. -## How to run a test - -To run a test, you need to first choose a test objective. Currently, we support the following: - -``` -target/testnet/subsystem-bench --help -The almighty Subsystem Benchmark Tool™️ - -Usage: subsystem-bench [OPTIONS] - -Commands: - data-availability-read Benchmark availability recovery strategies +### Standard test options ``` +$ subsystem-bench --help +Usage: subsystem-bench [OPTIONS] -Note: `test-sequence` is a special test objective that wraps up an arbitrary number of test objectives. It is tipically - used to run a suite of tests defined in a `yaml` file like in this [example](examples/availability_read.yaml). +Arguments: + Path to the test sequence configuration file -### Standard test options - -``` - --network The type of network to be emulated [default: ideal] [possible values: ideal, healthy, - degraded] - --n-cores Number of cores to fetch availability for [default: 100] - --n-validators Number of validators to fetch chunks from [default: 500] - --min-pov-size The minimum pov size in KiB [default: 5120] - --max-pov-size The maximum pov size bytes [default: 5120] - -n, --num-blocks The number of blocks the test is going to run [default: 1] - -p, --peer-bandwidth The bandwidth of emulated remote peers in KiB - -b, --bandwidth The bandwidth of our node in KiB - --connectivity Emulated peer connection ratio [0-100] - --peer-mean-latency Mean remote peer latency in milliseconds [0-5000] - --peer-latency-std-dev Remote peer latency standard deviation +Options: --profile Enable CPU Profiling with Pyroscope --pyroscope-url Pyroscope Server URL [default: http://localhost:4040] --pyroscope-sample-rate Pyroscope Sample Rate [default: 113] @@ -135,27 +113,17 @@ Note: `test-sequence` is a special test objective that wraps up an arbitrary num -h, --help Print help ``` -These apply to all test objectives, except `test-sequence` which relies on the values being specified in a file. - -### Test objectives - -Each test objective can have it's specific configuration options, in contrast with the standard test options. +## How to run a test -For `data-availability-read` the recovery strategy to be used is configurable. +To run a test, you need to use a path to a test objective: ``` -target/testnet/subsystem-bench data-availability-read --help -Benchmark availability recovery strategies - -Usage: subsystem-bench data-availability-read [OPTIONS] - -Options: - -f, --fetch-from-backers Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU - as we don't need to re-construct from chunks. Tipically this is only faster if nodes - have enough bandwidth - -h, --help Print help +target/testnet/subsystem-bench polkadot/node/subsystem-bench/examples/availability_read.yaml ``` +Note: test objectives may be wrapped up into a test sequence. +It is tipically used to run a suite of tests like in this [example](examples/availability_read.yaml). + ### Understanding the test configuration A single test configuration `TestConfiguration` struct applies to a single run of a certain test objective. @@ -175,36 +143,65 @@ the test is started. ### Example run -Let's run an availabilty read test which will recover availability for 10 cores with max PoV size on a 500 +Let's run an availabilty read test which will recover availability for 200 cores with max PoV size on a 1000 node validator network. + + ``` - target/testnet/subsystem-bench --n-cores 10 data-availability-read -[2023-11-28T09:01:59Z INFO subsystem_bench::core::display] n_validators = 500, n_cores = 10, pov_size = 5120 - 5120, - latency = None -[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Generating template candidate index=0 pov_size=5242880 -[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Created test environment. -[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Pre-generating 10 candidates. -[2023-11-28T09:02:01Z INFO subsystem-bench::core] Initializing network emulation for 500 peers. -[2023-11-28T09:02:01Z INFO substrate_prometheus_endpoint] 〽️ Prometheus exporter started at 127.0.0.1:9999 -[2023-11-28T09:02:01Z INFO subsystem-bench::availability] Current block 1/1 -[2023-11-28T09:02:01Z INFO subsystem_bench::availability] 10 recoveries pending -[2023-11-28T09:02:04Z INFO subsystem_bench::availability] Block time 3231ms -[2023-11-28T09:02:04Z INFO subsystem-bench::availability] Sleeping till end of block (2768ms) -[2023-11-28T09:02:07Z INFO subsystem_bench::availability] All blocks processed in 6001ms -[2023-11-28T09:02:07Z INFO subsystem_bench::availability] Throughput: 51200 KiB/block -[2023-11-28T09:02:07Z INFO subsystem_bench::availability] Block time: 6001 ms -[2023-11-28T09:02:07Z INFO subsystem_bench::availability] - - Total received from network: 66 MiB - Total sent to network: 58 KiB - Total subsystem CPU usage 4.16s - CPU usage per block 4.16s - Total test environment CPU usage 0.00s - CPU usage per block 0.00s +target/testnet/subsystem-bench polkadot/node/subsystem-bench/examples/availability_write.yaml +[2024-02-19T14:10:32.981Z INFO subsystem_bench] Sequence contains 1 step(s) +[2024-02-19T14:10:32.981Z INFO subsystem-bench::cli] Step 1/1 +[2024-02-19T14:10:32.981Z INFO subsystem-bench::cli] [objective = DataAvailabilityWrite] n_validators = 1000, n_cores = 200, pov_size = 5120 - 5120, connectivity = 75, latency = Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 }) +[2024-02-19T14:10:32.982Z INFO subsystem-bench::availability] Generating template candidate index=0 pov_size=5242880 +[2024-02-19T14:10:33.106Z INFO subsystem-bench::availability] Created test environment. +[2024-02-19T14:10:33.106Z INFO subsystem-bench::availability] Pre-generating 600 candidates. +[2024-02-19T14:10:34.096Z INFO subsystem-bench::network] Initializing emulation for a 1000 peer network. +[2024-02-19T14:10:34.096Z INFO subsystem-bench::network] connectivity 75%, latency Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 }) +[2024-02-19T14:10:34.098Z INFO subsystem-bench::network] Network created, connected validator count 749 +[2024-02-19T14:10:34.099Z INFO subsystem-bench::availability] Seeding availability store with candidates ... +[2024-02-19T14:10:34.100Z INFO substrate_prometheus_endpoint] 〽️ Prometheus exporter started at 127.0.0.1:9999 +[2024-02-19T14:10:34.387Z INFO subsystem-bench::availability] Done +[2024-02-19T14:10:34.387Z INFO subsystem-bench::availability] Current block #1 +[2024-02-19T14:10:34.389Z INFO subsystem-bench::availability] Waiting for all emulated peers to receive their chunk from us ... +[2024-02-19T14:10:34.625Z INFO subsystem-bench::availability] All chunks received in 237ms +[2024-02-19T14:10:34.626Z INFO polkadot_subsystem_bench::availability] Waiting for 749 bitfields to be received and processed +[2024-02-19T14:10:35.710Z INFO subsystem-bench::availability] All bitfields processed +[2024-02-19T14:10:35.710Z INFO subsystem-bench::availability] All work for block completed in 1322ms +[2024-02-19T14:10:35.710Z INFO subsystem-bench::availability] Current block #2 +[2024-02-19T14:10:35.712Z INFO subsystem-bench::availability] Waiting for all emulated peers to receive their chunk from us ... +[2024-02-19T14:10:35.947Z INFO subsystem-bench::availability] All chunks received in 236ms +[2024-02-19T14:10:35.947Z INFO polkadot_subsystem_bench::availability] Waiting for 749 bitfields to be received and processed +[2024-02-19T14:10:37.038Z INFO subsystem-bench::availability] All bitfields processed +[2024-02-19T14:10:37.038Z INFO subsystem-bench::availability] All work for block completed in 1328ms +[2024-02-19T14:10:37.039Z INFO subsystem-bench::availability] Current block #3 +[2024-02-19T14:10:37.040Z INFO subsystem-bench::availability] Waiting for all emulated peers to receive their chunk from us ... +[2024-02-19T14:10:37.276Z INFO subsystem-bench::availability] All chunks received in 237ms +[2024-02-19T14:10:37.276Z INFO polkadot_subsystem_bench::availability] Waiting for 749 bitfields to be received and processed +[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] All bitfields processed +[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] All work for block completed in 1323ms +[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] All blocks processed in 3974ms +[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] Avg block time: 1324 ms +[2024-02-19T14:10:38.362Z INFO parachain::availability-store] received `Conclude` signal, exiting +[2024-02-19T14:10:38.362Z INFO parachain::bitfield-distribution] Conclude +[2024-02-19T14:10:38.362Z INFO subsystem-bench::network] Downlink channel closed, network interface task exiting + +polkadot/node/subsystem-bench/examples/availability_write.yaml #1 DataAvailabilityWrite + +Network usage, KiB total per block +Received from peers 12922.000 4307.333 +Sent to peers 47705.000 15901.667 + +CPU usage, seconds total per block +availability-distribution 0.045 0.015 +bitfield-distribution 0.104 0.035 +availability-store 0.304 0.101 +Test environment 3.213 1.071 ``` -`Block time` in the context of `data-availability-read` has a different meaning. It measures the amount of time it + + +`Block time` in the current context has a different meaning. It measures the amount of time it took the subsystem to finish processing all of the messages sent in the context of the current test block. ### Test logs @@ -233,8 +230,9 @@ Since the execution will be very slow, it's recommended not to run it together w benchmark results into account. A report is saved in a file `cachegrind_report.txt`. Example run results: + ``` -$ target/testnet/subsystem-bench --n-cores 10 --cache-misses data-availability-read +$ target/testnet/subsystem-bench --cache-misses cache-misses-data-availability-read.yaml $ cat cachegrind_report.txt I refs: 64,622,081,485 I1 misses: 3,018,168 @@ -275,7 +273,7 @@ happy and negative scenarios (low bandwidth, network errors and low connectivity To faster write a new test objective you need to use some higher level wrappers and logic: `TestEnvironment`, `TestConfiguration`, `TestAuthorities`, `NetworkEmulator`. To create the `TestEnvironment` you will -need to also build an `Overseer`, but that should be easy using the mockups for subsystems in`core::mock`. +need to also build an `Overseer`, but that should be easy using the mockups for subsystems in `mock`. ### Mocking diff --git a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml index 758c7fbbf1121a249ff3094bc74967e3ad2420da..146da57d44c4aaf973e13c886a357028cdbe3559 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml @@ -1,14 +1,14 @@ TestConfiguration: # Test 1 - objective: !ApprovalVoting - last_considered_tranche: 89 coalesce_mean: 3.0 coalesce_std_dev: 1.0 + enable_assignments_v2: true + last_considered_tranche: 89 stop_when_approved: true coalesce_tranche_diff: 12 - workdir_prefix: "/tmp/" - enable_assignments_v2: true num_no_shows_per_candidate: 10 + workdir_prefix: "/tmp/" n_validators: 500 n_cores: 100 min_pov_size: 1120 diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml index 9eeeefc53a4277aae51e44ee5940a2eb65111cd3..6b17e62c20aa3f69153fb596d1a303a2e0320ddd 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml @@ -7,11 +7,10 @@ TestConfiguration: last_considered_tranche: 89 stop_when_approved: false coalesce_tranche_diff: 12 - workdir_prefix: "/tmp" num_no_shows_per_candidate: 0 + workdir_prefix: "/tmp" n_validators: 500 n_cores: 100 - n_included_candidates: 100 min_pov_size: 1120 max_pov_size: 5120 peer_bandwidth: 524288000000 diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml index 370bb31a5c4c102174c29382ab6de70ca336278e..e946c28e8ef5d4e38736ffc21e56d8b1c6cd0ddc 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml @@ -7,8 +7,8 @@ TestConfiguration: last_considered_tranche: 89 stop_when_approved: true coalesce_tranche_diff: 12 - workdir_prefix: "/tmp/" num_no_shows_per_candidate: 0 + workdir_prefix: "/tmp/" n_validators: 500 n_cores: 100 min_pov_size: 1120 diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml index 30b9ac8dc50fec25bc4952ef3706f9878aa2bbd7..8f4b050e72f27dd4b5bb0c52bd49162cd0bb83ec 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml @@ -7,8 +7,8 @@ TestConfiguration: last_considered_tranche: 89 stop_when_approved: false coalesce_tranche_diff: 12 - workdir_prefix: "/tmp/" num_no_shows_per_candidate: 0 + workdir_prefix: "/tmp/" n_validators: 500 n_cores: 100 min_pov_size: 1120 diff --git a/polkadot/node/subsystem-bench/src/availability/cli.rs b/polkadot/node/subsystem-bench/src/availability/cli.rs deleted file mode 100644 index 65df8c1552aa8266497eb2738cc562f656050b68..0000000000000000000000000000000000000000 --- a/polkadot/node/subsystem-bench/src/availability/cli.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use serde::{Deserialize, Serialize}; - -#[derive(clap::ValueEnum, Clone, Copy, Debug, PartialEq)] -#[value(rename_all = "kebab-case")] -#[non_exhaustive] -pub enum NetworkEmulation { - Ideal, - Healthy, - Degraded, -} - -#[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)] -#[clap(rename_all = "kebab-case")] -#[allow(missing_docs)] -pub struct DataAvailabilityReadOptions { - #[clap(short, long, default_value_t = false)] - /// Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU as - /// we don't need to re-construct from chunks. Tipically this is only faster if nodes have - /// enough bandwidth. - pub fetch_from_backers: bool, -} diff --git a/polkadot/node/subsystem-bench/src/subsystem-bench.rs b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs similarity index 62% rename from polkadot/node/subsystem-bench/src/subsystem-bench.rs rename to polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs index 0803f175474e69a76640b12497acaaafa4cd5213..deb351360d74ede1ea78494a34f7c459dc544cdd 100644 --- a/polkadot/node/subsystem-bench/src/subsystem-bench.rs +++ b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs @@ -14,54 +14,32 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! A tool for running subsystem benchmark tests designed for development and -//! CI regression testing. - -use approval::{bench_approvals, ApprovalsOptions}; -use availability::{ - cli::{DataAvailabilityReadOptions, NetworkEmulation}, - prepare_test, TestState, -}; +//! A tool for running subsystem benchmark tests +//! designed for development and CI regression testing. + use clap::Parser; -use clap_num::number_range; use color_eyre::eyre; use colored::Colorize; -use core::{ - configuration::TestConfiguration, - display::display_configuration, - environment::{TestEnvironment, GENESIS_HASH}, -}; +use polkadot_subsystem_bench::{approval, availability, configuration}; use pyroscope::PyroscopeAgent; use pyroscope_pprofrs::{pprof_backend, PprofConfig}; use serde::{Deserialize, Serialize}; use std::path::Path; -mod approval; -mod availability; -mod core; mod valgrind; -const LOG_TARGET: &str = "subsystem-bench"; - -fn le_100(s: &str) -> Result { - number_range(s, 0, 100) -} - -fn le_5000(s: &str) -> Result { - number_range(s, 0, 5000) -} +const LOG_TARGET: &str = "subsystem-bench::cli"; /// Supported test objectives #[derive(Debug, Clone, Parser, Serialize, Deserialize)] #[command(rename_all = "kebab-case")] pub enum TestObjective { /// Benchmark availability recovery strategies. - DataAvailabilityRead(DataAvailabilityReadOptions), + DataAvailabilityRead(availability::DataAvailabilityReadOptions), /// Benchmark availability and bitfield distribution. DataAvailabilityWrite, /// Benchmark the approval-voting and approval-distribution subsystems. - ApprovalVoting(ApprovalsOptions), - Unimplemented, + ApprovalVoting(approval::ApprovalsOptions), } impl std::fmt::Display for TestObjective { @@ -73,39 +51,37 @@ impl std::fmt::Display for TestObjective { Self::DataAvailabilityRead(_) => "DataAvailabilityRead", Self::DataAvailabilityWrite => "DataAvailabilityWrite", Self::ApprovalVoting(_) => "ApprovalVoting", - Self::Unimplemented => "Unimplemented", } ) } } -#[derive(Debug, Parser)] -#[allow(missing_docs)] -struct BenchCli { - #[arg(long, value_enum, ignore_case = true, default_value_t = NetworkEmulation::Ideal)] - /// The type of network to be emulated - pub network: NetworkEmulation, - - #[clap(short, long)] - /// The bandwidth of emulated remote peers in KiB - pub peer_bandwidth: Option, - - #[clap(short, long)] - /// The bandwidth of our node in KiB - pub bandwidth: Option, - - #[clap(long, value_parser=le_100)] - /// Emulated peer connection ratio [0-100]. - pub connectivity: Option, +/// The test input parameters +#[derive(Clone, Debug, Serialize, Deserialize)] +struct CliTestConfiguration { + /// Test Objective + pub objective: TestObjective, + /// Test Configuration + #[serde(flatten)] + pub test_config: configuration::TestConfiguration, +} - #[clap(long, value_parser=le_5000)] - /// Mean remote peer latency in milliseconds [0-5000]. - pub peer_mean_latency: Option, +#[derive(Serialize, Deserialize)] +pub struct TestSequence { + #[serde(rename(serialize = "TestConfiguration", deserialize = "TestConfiguration"))] + test_configurations: Vec, +} - #[clap(long, value_parser=le_5000)] - /// Remote peer latency standard deviation - pub peer_latency_std_dev: Option, +impl TestSequence { + fn new_from_file(path: &Path) -> std::io::Result { + let string = String::from_utf8(std::fs::read(path)?).expect("File is valid UTF8"); + Ok(serde_yaml::from_str(&string).expect("File is valid test sequence YA")) + } +} +#[derive(Debug, Parser)] +#[allow(missing_docs)] +struct BenchCli { #[clap(long, default_value_t = false)] /// Enable CPU Profiling with Pyroscope pub profile: bool, @@ -122,10 +98,6 @@ struct BenchCli { /// Enable Cache Misses Profiling with Valgrind. Linux only, Valgrind must be in the PATH pub cache_misses: bool, - #[clap(long, default_value_t = false)] - /// Shows the output in YAML format - pub yaml_output: bool, - #[arg(required = true)] /// Path to the test sequence configuration file pub path: String, @@ -148,49 +120,60 @@ impl BenchCli { None }; - let test_sequence = core::configuration::TestSequence::new_from_file(Path::new(&self.path)) + let test_sequence = TestSequence::new_from_file(Path::new(&self.path)) .expect("File exists") - .into_vec(); + .test_configurations; let num_steps = test_sequence.len(); gum::info!("{}", format!("Sequence contains {} step(s)", num_steps).bright_purple()); - for (index, test_config) in test_sequence.into_iter().enumerate() { - let benchmark_name = format!("{} #{} {}", &self.path, index + 1, test_config.objective); - gum::info!(target: LOG_TARGET, "{}", format!("Step {}/{}", index + 1, num_steps).bright_purple(),); - display_configuration(&test_config); - let usage = match test_config.objective { - TestObjective::DataAvailabilityRead(ref _opts) => { - let mut state = TestState::new(&test_config); - let (mut env, _protocol_config) = prepare_test(test_config, &mut state); + for (index, CliTestConfiguration { objective, mut test_config }) in + test_sequence.into_iter().enumerate() + { + let benchmark_name = format!("{} #{} {}", &self.path, index + 1, objective); + gum::info!(target: LOG_TARGET, "{}", format!("Step {}/{}", index + 1, num_steps).bright_purple(),); + gum::info!(target: LOG_TARGET, "[{}] {}", format!("objective = {:?}", objective).green(), test_config); + test_config.generate_pov_sizes(); + + let usage = match objective { + TestObjective::DataAvailabilityRead(opts) => { + let mut state = availability::TestState::new(&test_config); + let (mut env, _protocol_config) = availability::prepare_test( + test_config, + &mut state, + availability::TestDataAvailability::Read(opts), + true, + ); env.runtime().block_on(availability::benchmark_availability_read( &benchmark_name, &mut env, state, )) }, - TestObjective::ApprovalVoting(ref options) => { - let (mut env, state) = - approval::prepare_test(test_config.clone(), options.clone()); - env.runtime().block_on(bench_approvals(&benchmark_name, &mut env, state)) - }, TestObjective::DataAvailabilityWrite => { - let mut state = TestState::new(&test_config); - let (mut env, _protocol_config) = prepare_test(test_config, &mut state); + let mut state = availability::TestState::new(&test_config); + let (mut env, _protocol_config) = availability::prepare_test( + test_config, + &mut state, + availability::TestDataAvailability::Write, + true, + ); env.runtime().block_on(availability::benchmark_availability_write( &benchmark_name, &mut env, state, )) }, - TestObjective::Unimplemented => todo!(), - }; - - let output = if self.yaml_output { - serde_yaml::to_string(&vec![usage])? - } else { - usage.to_string() + TestObjective::ApprovalVoting(ref options) => { + let (mut env, state) = + approval::prepare_test(test_config.clone(), options.clone(), true); + env.runtime().block_on(approval::bench_approvals( + &benchmark_name, + &mut env, + state, + )) + }, }; - println!("{}", output); + println!("{}", usage); } if let Some(agent_running) = agent_running { diff --git a/polkadot/node/subsystem-bench/src/valgrind.rs b/polkadot/node/subsystem-bench/src/cli/valgrind.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/valgrind.rs rename to polkadot/node/subsystem-bench/src/cli/valgrind.rs diff --git a/polkadot/node/subsystem-bench/src/approval/helpers.rs b/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs similarity index 99% rename from polkadot/node/subsystem-bench/src/approval/helpers.rs rename to polkadot/node/subsystem-bench/src/lib/approval/helpers.rs index 623d91848f53f26e69ad7595c52a0e61466af6d1..af5ff5aa1facc11f2d0caa8ec77276976a0554a5 100644 --- a/polkadot/node/subsystem-bench/src/approval/helpers.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::core::configuration::TestAuthorities; +use crate::configuration::TestAuthorities; use itertools::Itertools; use polkadot_node_core_approval_voting::time::{Clock, SystemClock, Tick}; use polkadot_node_network_protocol::{ diff --git a/polkadot/node/subsystem-bench/src/approval/message_generator.rs b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs similarity index 97% rename from polkadot/node/subsystem-bench/src/approval/message_generator.rs rename to polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs index a71034013247678aeb9aced2619f8d1d6d29ebe9..c1b31a509f6dd21c3f23bb44afec8859e51fa76a 100644 --- a/polkadot/node/subsystem-bench/src/approval/message_generator.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs @@ -18,15 +18,12 @@ use crate::{ approval::{ helpers::{generate_babe_epoch, generate_topology}, test_message::{MessagesBundle, TestMessageInfo}, - ApprovalTestState, BlockTestData, GeneratedState, BUFFER_FOR_GENERATION_MILLIS, LOG_TARGET, - SLOT_DURATION_MILLIS, + ApprovalTestState, ApprovalsOptions, BlockTestData, GeneratedState, + BUFFER_FOR_GENERATION_MILLIS, LOG_TARGET, SLOT_DURATION_MILLIS, }, - core::{ - configuration::{TestAuthorities, TestConfiguration}, - mock::runtime_api::session_info_for_peers, - NODE_UNDER_TEST, - }, - ApprovalsOptions, TestObjective, + configuration::{TestAuthorities, TestConfiguration}, + mock::runtime_api::session_info_for_peers, + NODE_UNDER_TEST, }; use futures::SinkExt; use itertools::Itertools; @@ -132,11 +129,7 @@ impl PeerMessagesGenerator { options: &ApprovalsOptions, ) -> String { let mut fingerprint = options.fingerprint(); - let mut exclude_objective = configuration.clone(); - // The objective contains the full content of `ApprovalOptions`, we don't want to put all of - // that in fingerprint, so execlute it because we add it manually see above. - exclude_objective.objective = TestObjective::Unimplemented; - let configuration_bytes = bincode::serialize(&exclude_objective).unwrap(); + let configuration_bytes = bincode::serialize(&configuration).unwrap(); fingerprint.extend(configuration_bytes); let mut sha1 = sha1::Sha1::new(); sha1.update(fingerprint); diff --git a/polkadot/node/subsystem-bench/src/approval/mock_chain_selection.rs b/polkadot/node/subsystem-bench/src/lib/approval/mock_chain_selection.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/approval/mock_chain_selection.rs rename to polkadot/node/subsystem-bench/src/lib/approval/mock_chain_selection.rs diff --git a/polkadot/node/subsystem-bench/src/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs similarity index 97% rename from polkadot/node/subsystem-bench/src/approval/mod.rs rename to polkadot/node/subsystem-bench/src/lib/approval/mod.rs index f07912de1887580a58770b9bf9e76624c60bdcc4..450faf06123f61db9ee91d1d935721d3c128e701 100644 --- a/polkadot/node/subsystem-bench/src/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -24,25 +24,21 @@ use crate::{ mock_chain_selection::MockChainSelection, test_message::{MessagesBundle, TestMessageInfo}, }, - core::{ - configuration::TestAuthorities, - environment::{ - BenchmarkUsage, TestEnvironment, TestEnvironmentDependencies, MAX_TIME_OF_FLIGHT, - }, - mock::{ - chain_api::{ChainApiState, MockChainApi}, - dummy_builder, - network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx}, - runtime_api::MockRuntimeApi, - AlwaysSupportsParachains, TestSyncOracle, - }, - network::{ - new_network, HandleNetworkMessage, NetworkEmulatorHandle, NetworkInterface, - NetworkInterfaceReceiver, - }, - NODE_UNDER_TEST, + configuration::{TestAuthorities, TestConfiguration}, + dummy_builder, + environment::{TestEnvironment, TestEnvironmentDependencies, MAX_TIME_OF_FLIGHT}, + mock::{ + chain_api::{ChainApiState, MockChainApi}, + network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx}, + runtime_api::MockRuntimeApi, + AlwaysSupportsParachains, TestSyncOracle, }, - TestConfiguration, + network::{ + new_network, HandleNetworkMessage, NetworkEmulatorHandle, NetworkInterface, + NetworkInterfaceReceiver, + }, + usage::BenchmarkUsage, + NODE_UNDER_TEST, }; use colored::Colorize; use futures::channel::oneshot; @@ -472,11 +468,9 @@ impl ApprovalTestState { impl HandleNetworkMessage for ApprovalTestState { fn handle( &self, - _message: crate::core::network::NetworkMessage, - _node_sender: &mut futures::channel::mpsc::UnboundedSender< - crate::core::network::NetworkMessage, - >, - ) -> Option { + _message: crate::network::NetworkMessage, + _node_sender: &mut futures::channel::mpsc::UnboundedSender, + ) -> Option { self.total_sent_messages_from_node .as_ref() .fetch_add(1, std::sync::atomic::Ordering::SeqCst); @@ -841,8 +835,14 @@ fn build_overseer( pub fn prepare_test( config: TestConfiguration, options: ApprovalsOptions, + with_prometheus_endpoint: bool, ) -> (TestEnvironment, ApprovalTestState) { - prepare_test_inner(config, TestEnvironmentDependencies::default(), options) + prepare_test_inner( + config, + TestEnvironmentDependencies::default(), + options, + with_prometheus_endpoint, + ) } /// Build the test environment for an Approval benchmark. @@ -850,6 +850,7 @@ fn prepare_test_inner( config: TestConfiguration, dependencies: TestEnvironmentDependencies, options: ApprovalsOptions, + with_prometheus_endpoint: bool, ) -> (TestEnvironment, ApprovalTestState) { gum::info!("Prepare test state"); let state = ApprovalTestState::new(&config, options, &dependencies); @@ -878,6 +879,7 @@ fn prepare_test_inner( overseer, overseer_handle, state.test_authorities.clone(), + with_prometheus_endpoint, ), state, ) diff --git a/polkadot/node/subsystem-bench/src/approval/test_message.rs b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs similarity index 98% rename from polkadot/node/subsystem-bench/src/approval/test_message.rs rename to polkadot/node/subsystem-bench/src/lib/approval/test_message.rs index 8aaabc3426c803563bdb2ed8455eed8a0061484e..63e383509be9ff6da6e1d5dd61baebc99c574bc4 100644 --- a/polkadot/node/subsystem-bench/src/approval/test_message.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs @@ -15,9 +15,8 @@ // along with Polkadot. If not, see . use crate::{ - approval::{BlockTestData, CandidateTestData}, - core::configuration::TestAuthorities, - ApprovalsOptions, + approval::{ApprovalsOptions, BlockTestData, CandidateTestData}, + configuration::TestAuthorities, }; use itertools::Itertools; use parity_scale_codec::{Decode, Encode}; diff --git a/polkadot/node/subsystem-bench/src/availability/av_store_helpers.rs b/polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs similarity index 94% rename from polkadot/node/subsystem-bench/src/availability/av_store_helpers.rs rename to polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs index 261dbd0376c732fb4688e6251f32e21d3ec8d4d4..3300def2235ee0ea90f466316f9bc5a7a965869c 100644 --- a/polkadot/node/subsystem-bench/src/availability/av_store_helpers.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::core::{environment::TestEnvironmentDependencies, mock::TestSyncOracle}; +use crate::{environment::TestEnvironmentDependencies, mock::TestSyncOracle}; use polkadot_node_core_av_store::{AvailabilityStoreSubsystem, Config}; use polkadot_node_metrics::metrics::Metrics; use polkadot_node_subsystem_util::database::Database; diff --git a/polkadot/node/subsystem-bench/src/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs similarity index 94% rename from polkadot/node/subsystem-bench/src/availability/mod.rs rename to polkadot/node/subsystem-bench/src/lib/availability/mod.rs index ad9a17ff8f47a1a3dc7e8e8e990cb0b1f7f262f6..f012a5a907edac40ea8a2b9d8c8ebd1a1dc4b7e1 100644 --- a/polkadot/node/subsystem-bench/src/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -15,22 +15,18 @@ // along with Polkadot. If not, see . use crate::{ - core::{ - configuration::TestConfiguration, - environment::{BenchmarkUsage, TestEnvironmentDependencies}, - mock::{ - av_store, - av_store::MockAvailabilityStore, - chain_api::{ChainApiState, MockChainApi}, - dummy_builder, - network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx}, - runtime_api, - runtime_api::MockRuntimeApi, - AlwaysSupportsParachains, - }, - network::new_network, + configuration::TestConfiguration, + dummy_builder, + environment::{TestEnvironment, TestEnvironmentDependencies, GENESIS_HASH}, + mock::{ + av_store::{self, MockAvailabilityStore}, + chain_api::{ChainApiState, MockChainApi}, + network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx}, + runtime_api::{self, MockRuntimeApi}, + AlwaysSupportsParachains, }, - TestEnvironment, TestObjective, GENESIS_HASH, + network::new_network, + usage::BenchmarkUsage, }; use av_store::NetworkAvailabilityState; use av_store_helpers::new_av_store; @@ -73,14 +69,30 @@ use sc_network::{ PeerId, }; use sc_service::SpawnTaskHandle; +use serde::{Deserialize, Serialize}; use sp_core::H256; use std::{collections::HashMap, iter::Cycle, ops::Sub, sync::Arc, time::Instant}; mod av_store_helpers; -pub(crate) mod cli; const LOG_TARGET: &str = "subsystem-bench::availability"; +#[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)] +#[clap(rename_all = "kebab-case")] +#[allow(missing_docs)] +pub struct DataAvailabilityReadOptions { + #[clap(short, long, default_value_t = false)] + /// Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU as + /// we don't need to re-construct from chunks. Tipically this is only faster if nodes have + /// enough bandwidth. + pub fetch_from_backers: bool, +} + +pub enum TestDataAvailability { + Read(DataAvailabilityReadOptions), + Write, +} + fn build_overseer_for_availability_read( spawn_task_handle: SpawnTaskHandle, runtime_api: MockRuntimeApi, @@ -141,14 +153,24 @@ fn build_overseer_for_availability_write( pub fn prepare_test( config: TestConfiguration, state: &mut TestState, + mode: TestDataAvailability, + with_prometheus_endpoint: bool, ) -> (TestEnvironment, Vec) { - prepare_test_inner(config, state, TestEnvironmentDependencies::default()) + prepare_test_inner( + config, + state, + mode, + TestEnvironmentDependencies::default(), + with_prometheus_endpoint, + ) } fn prepare_test_inner( config: TestConfiguration, state: &mut TestState, + mode: TestDataAvailability, dependencies: TestEnvironmentDependencies, + with_prometheus_endpoint: bool, ) -> (TestEnvironment, Vec) { // Generate test authorities. let test_authorities = config.generate_authorities(); @@ -216,8 +238,8 @@ fn prepare_test_inner( let network_bridge_rx = network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_cfg.clone())); - let (overseer, overseer_handle) = match &state.config().objective { - TestObjective::DataAvailabilityRead(options) => { + let (overseer, overseer_handle) = match &mode { + TestDataAvailability::Read(options) => { let use_fast_path = options.fetch_from_backers; let subsystem = if use_fast_path { @@ -247,7 +269,7 @@ fn prepare_test_inner( &dependencies, ) }, - TestObjective::DataAvailabilityWrite => { + TestDataAvailability::Write => { let availability_distribution = AvailabilityDistributionSubsystem::new( test_authorities.keyring.keystore(), IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver }, @@ -284,9 +306,6 @@ fn prepare_test_inner( &dependencies, ) }, - _ => { - unimplemented!("Invalid test objective") - }, }; ( @@ -297,6 +316,7 @@ fn prepare_test_inner( overseer, overseer_handle, test_authorities, + with_prometheus_endpoint, ), req_cfgs, ) @@ -326,10 +346,6 @@ pub struct TestState { } impl TestState { - fn config(&self) -> &TestConfiguration { - &self.config - } - pub fn next_candidate(&mut self) -> Option { let candidate = self.candidates.next(); let candidate_hash = candidate.as_ref().unwrap().hash(); diff --git a/polkadot/node/subsystem-bench/src/core/configuration.rs b/polkadot/node/subsystem-bench/src/lib/configuration.rs similarity index 83% rename from polkadot/node/subsystem-bench/src/core/configuration.rs rename to polkadot/node/subsystem-bench/src/lib/configuration.rs index 00be2a86b173a61465d7610d75df1abbe09363f0..c76933085271aa8a3fea01a1bab137d77b104fed 100644 --- a/polkadot/node/subsystem-bench/src/core/configuration.rs +++ b/polkadot/node/subsystem-bench/src/lib/configuration.rs @@ -16,7 +16,7 @@ //! Test configuration definition and helpers. -use crate::{core::keyring::Keyring, TestObjective}; +use crate::keyring::Keyring; use itertools::Itertools; use polkadot_primitives::{AssignmentId, AuthorityDiscoveryId, ValidatorId}; use rand::thread_rng; @@ -24,17 +24,7 @@ use rand_distr::{Distribution, Normal, Uniform}; use sc_network::PeerId; use serde::{Deserialize, Serialize}; use sp_consensus_babe::AuthorityId; -use std::{collections::HashMap, path::Path}; - -pub fn random_pov_size(min_pov_size: usize, max_pov_size: usize) -> usize { - random_uniform_sample(min_pov_size, max_pov_size) -} - -fn random_uniform_sample + From>(min_value: T, max_value: T) -> T { - Uniform::from(min_value.into()..=max_value.into()) - .sample(&mut thread_rng()) - .into() -} +use std::collections::HashMap; /// Peer networking latency configuration. #[derive(Clone, Debug, Default, Serialize, Deserialize)] @@ -87,8 +77,6 @@ fn default_no_show_slots() -> usize { /// The test input parameters #[derive(Clone, Debug, Serialize, Deserialize)] pub struct TestConfiguration { - /// The test objective - pub objective: TestObjective, /// Number of validators pub n_validators: usize, /// Number of cores @@ -115,7 +103,7 @@ pub struct TestConfiguration { pub max_pov_size: usize, /// Randomly sampled pov_sizes #[serde(skip)] - pov_sizes: Vec, + pub pov_sizes: Vec, /// The amount of bandiwdth remote validators have. #[serde(default = "default_bandwidth")] pub peer_bandwidth: usize, @@ -133,56 +121,32 @@ pub struct TestConfiguration { pub num_blocks: usize, } -fn generate_pov_sizes(count: usize, min_kib: usize, max_kib: usize) -> Vec { - (0..count).map(|_| random_pov_size(min_kib * 1024, max_kib * 1024)).collect() -} - -#[derive(Serialize, Deserialize)] -pub struct TestSequence { - #[serde(rename(serialize = "TestConfiguration", deserialize = "TestConfiguration"))] - test_configurations: Vec, -} - -impl TestSequence { - pub fn into_vec(self) -> Vec { - self.test_configurations - .into_iter() - .map(|mut config| { - config.pov_sizes = - generate_pov_sizes(config.n_cores, config.min_pov_size, config.max_pov_size); - config - }) - .collect() - } -} - -impl TestSequence { - pub fn new_from_file(path: &Path) -> std::io::Result { - let string = String::from_utf8(std::fs::read(path)?).expect("File is valid UTF8"); - Ok(serde_yaml::from_str(&string).expect("File is valid test sequence YA")) +impl Default for TestConfiguration { + fn default() -> Self { + Self { + n_validators: Default::default(), + n_cores: Default::default(), + needed_approvals: default_needed_approvals(), + zeroth_delay_tranche_width: default_zeroth_delay_tranche_width(), + relay_vrf_modulo_samples: default_relay_vrf_modulo_samples(), + n_delay_tranches: default_n_delay_tranches(), + no_show_slots: default_no_show_slots(), + max_validators_per_core: default_backing_group_size(), + min_pov_size: default_pov_size(), + max_pov_size: default_pov_size(), + pov_sizes: Default::default(), + peer_bandwidth: default_bandwidth(), + bandwidth: default_bandwidth(), + latency: Default::default(), + connectivity: default_connectivity(), + num_blocks: Default::default(), + } } } -/// Helper struct for authority related state. -#[derive(Clone)] -pub struct TestAuthorities { - pub keyring: Keyring, - pub validator_public: Vec, - pub validator_authority_id: Vec, - pub validator_babe_id: Vec, - pub validator_assignment_id: Vec, - pub key_seeds: Vec, - pub peer_ids: Vec, - pub peer_id_to_authority: HashMap, -} - impl TestConfiguration { - #[allow(unused)] - pub fn write_to_disk(&self) { - // Serialize a slice of configurations - let yaml = serde_yaml::to_string(&TestSequence { test_configurations: vec![self.clone()] }) - .unwrap(); - std::fs::write("last_test.yaml", yaml).unwrap(); + pub fn generate_pov_sizes(&mut self) { + self.pov_sizes = generate_pov_sizes(self.n_cores, self.min_pov_size, self.max_pov_size); } pub fn pov_sizes(&self) -> &[usize] { @@ -239,6 +203,33 @@ impl TestConfiguration { } } +fn random_uniform_sample + From>(min_value: T, max_value: T) -> T { + Uniform::from(min_value.into()..=max_value.into()) + .sample(&mut thread_rng()) + .into() +} + +fn random_pov_size(min_pov_size: usize, max_pov_size: usize) -> usize { + random_uniform_sample(min_pov_size, max_pov_size) +} + +fn generate_pov_sizes(count: usize, min_kib: usize, max_kib: usize) -> Vec { + (0..count).map(|_| random_pov_size(min_kib * 1024, max_kib * 1024)).collect() +} + +/// Helper struct for authority related state. +#[derive(Clone)] +pub struct TestAuthorities { + pub keyring: Keyring, + pub validator_public: Vec, + pub validator_authority_id: Vec, + pub validator_babe_id: Vec, + pub validator_assignment_id: Vec, + pub key_seeds: Vec, + pub peer_ids: Vec, + pub peer_id_to_authority: HashMap, +} + /// Sample latency (in milliseconds) from a normal distribution with parameters /// specified in `maybe_peer_latency`. pub fn random_latency(maybe_peer_latency: Option<&PeerLatency>) -> usize { diff --git a/polkadot/node/subsystem-bench/src/core/display.rs b/polkadot/node/subsystem-bench/src/lib/display.rs similarity index 89% rename from polkadot/node/subsystem-bench/src/core/display.rs rename to polkadot/node/subsystem-bench/src/lib/display.rs index 13a349382e2fd776f41685fb133b96f1058e7b43..b153d54a7c36f43bc110dde9443f2413a34d9af6 100644 --- a/polkadot/node/subsystem-bench/src/core/display.rs +++ b/polkadot/node/subsystem-bench/src/lib/display.rs @@ -19,7 +19,7 @@ //! //! Currently histogram buckets are skipped. -use crate::{TestConfiguration, LOG_TARGET}; +use crate::configuration::TestConfiguration; use colored::Colorize; use prometheus::{ proto::{MetricFamily, MetricType}, @@ -27,6 +27,8 @@ use prometheus::{ }; use std::fmt::Display; +const LOG_TARGET: &str = "subsystem-bench::display"; + #[derive(Default, Debug)] pub struct MetricCollection(Vec); @@ -85,6 +87,7 @@ impl Display for MetricCollection { Ok(()) } } + #[derive(Debug, Clone)] pub struct TestMetric { name: String, @@ -184,15 +187,16 @@ pub fn parse_metrics(registry: &Registry) -> MetricCollection { test_metrics.into() } -pub fn display_configuration(test_config: &TestConfiguration) { - gum::info!( - "[{}] {}, {}, {}, {}, {}", - format!("objective = {:?}", test_config.objective).green(), - format!("n_validators = {}", test_config.n_validators).blue(), - format!("n_cores = {}", test_config.n_cores).blue(), - format!("pov_size = {} - {}", test_config.min_pov_size, test_config.max_pov_size) - .bright_black(), - format!("connectivity = {}", test_config.connectivity).bright_black(), - format!("latency = {:?}", test_config.latency).bright_black(), - ); +impl Display for TestConfiguration { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}, {}, {}, {}, {}", + format!("n_validators = {}", self.n_validators).blue(), + format!("n_cores = {}", self.n_cores).blue(), + format!("pov_size = {} - {}", self.min_pov_size, self.max_pov_size).bright_black(), + format!("connectivity = {}", self.connectivity).bright_black(), + format!("latency = {:?}", self.latency).bright_black(), + ) + } } diff --git a/polkadot/node/subsystem-bench/src/core/environment.rs b/polkadot/node/subsystem-bench/src/lib/environment.rs similarity index 88% rename from polkadot/node/subsystem-bench/src/core/environment.rs rename to polkadot/node/subsystem-bench/src/lib/environment.rs index ca4c41cf45f99851023e1c6899b3d1f5e7913eb8..958ed50d0894b76ddb66978950971620dceb4aa0 100644 --- a/polkadot/node/subsystem-bench/src/core/environment.rs +++ b/polkadot/node/subsystem-bench/src/lib/environment.rs @@ -17,13 +17,11 @@ //! Test environment implementation use crate::{ - core::{ - configuration::TestAuthorities, mock::AlwaysSupportsParachains, - network::NetworkEmulatorHandle, - }, - TestConfiguration, + configuration::{TestAuthorities, TestConfiguration}, + mock::AlwaysSupportsParachains, + network::NetworkEmulatorHandle, + usage::{BenchmarkUsage, ResourceUsage}, }; -use colored::Colorize; use core::time::Duration; use futures::{Future, FutureExt}; use polkadot_node_subsystem::{messages::AllMessages, Overseer, SpawnGlue, TimeoutExt}; @@ -33,7 +31,6 @@ use polkadot_node_subsystem_util::metrics::prometheus::{ }; use polkadot_overseer::{BlockInfo, Handle as OverseerHandle}; use sc_service::{SpawnTaskHandle, TaskManager}; -use serde::{Deserialize, Serialize}; use std::net::{Ipv4Addr, SocketAddr}; use tokio::runtime::Handle; @@ -204,6 +201,7 @@ impl TestEnvironment { overseer: Overseer, AlwaysSupportsParachains>, overseer_handle: OverseerHandle, authorities: TestAuthorities, + with_prometheus_endpoint: bool, ) -> Self { let metrics = TestEnvironmentMetrics::new(&dependencies.registry) .expect("Metrics need to be registered"); @@ -211,19 +209,21 @@ impl TestEnvironment { let spawn_handle = dependencies.task_manager.spawn_handle(); spawn_handle.spawn_blocking("overseer", "overseer", overseer.run().boxed()); - let registry_clone = dependencies.registry.clone(); - dependencies.task_manager.spawn_handle().spawn_blocking( - "prometheus", - "test-environment", - async move { - prometheus_endpoint::init_prometheus( - SocketAddr::new(std::net::IpAddr::V4(Ipv4Addr::LOCALHOST), 9999), - registry_clone, - ) - .await - .unwrap(); - }, - ); + if with_prometheus_endpoint { + let registry_clone = dependencies.registry.clone(); + dependencies.task_manager.spawn_handle().spawn_blocking( + "prometheus", + "test-environment", + async move { + prometheus_endpoint::init_prometheus( + SocketAddr::new(std::net::IpAddr::V4(Ipv4Addr::LOCALHOST), 9999), + registry_clone, + ) + .await + .unwrap(); + }, + ); + } TestEnvironment { runtime_handle: dependencies.runtime.handle().clone(), @@ -411,41 +411,3 @@ impl TestEnvironment { usage } } - -#[derive(Debug, Serialize, Deserialize)] -pub struct BenchmarkUsage { - benchmark_name: String, - network_usage: Vec, - cpu_usage: Vec, -} - -impl std::fmt::Display for BenchmarkUsage { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!( - f, - "\n{}\n\n{}\n{}\n\n{}\n{}\n", - self.benchmark_name.purple(), - format!("{:<32}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(), - self.network_usage - .iter() - .map(|v| v.to_string()) - .collect::>() - .join("\n"), - format!("{:<32}{:>12}{:>12}", "CPU usage in seconds", "total", "per block").blue(), - self.cpu_usage.iter().map(|v| v.to_string()).collect::>().join("\n") - ) - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct ResourceUsage { - resource_name: String, - total: f64, - per_block: f64, -} - -impl std::fmt::Display for ResourceUsage { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:<32}{:>12.3}{:>12.3}", self.resource_name.cyan(), self.total, self.per_block) - } -} diff --git a/polkadot/node/subsystem-bench/src/core/keyring.rs b/polkadot/node/subsystem-bench/src/lib/keyring.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/core/keyring.rs rename to polkadot/node/subsystem-bench/src/lib/keyring.rs diff --git a/polkadot/node/subsystem-bench/src/core/mod.rs b/polkadot/node/subsystem-bench/src/lib/lib.rs similarity index 88% rename from polkadot/node/subsystem-bench/src/core/mod.rs rename to polkadot/node/subsystem-bench/src/lib/lib.rs index 764184c5b37779efb7dc22dead338cc05ee9b942..d06f2822a8958169a15f449e71251e3cc62eb9d6 100644 --- a/polkadot/node/subsystem-bench/src/core/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/lib.rs @@ -15,11 +15,14 @@ // along with Polkadot. If not, see . // The validator index that represent the node that is under test. -pub(crate) const NODE_UNDER_TEST: u32 = 0; +pub const NODE_UNDER_TEST: u32 = 0; -pub(crate) mod configuration; +pub mod approval; +pub mod availability; +pub mod configuration; pub(crate) mod display; pub(crate) mod environment; pub(crate) mod keyring; pub(crate) mod mock; pub(crate) mod network; +pub mod usage; diff --git a/polkadot/node/subsystem-bench/src/core/mock/av_store.rs b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs similarity index 99% rename from polkadot/node/subsystem-bench/src/core/mock/av_store.rs rename to polkadot/node/subsystem-bench/src/lib/mock/av_store.rs index 0a7725c91e0421e79a20b2ec77d8ade72ab05714..41c4fe2cbadc0d71e5130ba12f048c0328231e82 100644 --- a/polkadot/node/subsystem-bench/src/core/mock/av_store.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs @@ -16,7 +16,7 @@ //! A generic av store subsystem mockup suitable to be used in benchmarks. -use crate::core::network::{HandleNetworkMessage, NetworkMessage}; +use crate::network::{HandleNetworkMessage, NetworkMessage}; use futures::{channel::oneshot, FutureExt}; use parity_scale_codec::Encode; use polkadot_node_network_protocol::request_response::{ diff --git a/polkadot/node/subsystem-bench/src/core/mock/chain_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/core/mock/chain_api.rs rename to polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs diff --git a/polkadot/node/subsystem-bench/src/core/mock/dummy.rs b/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/core/mock/dummy.rs rename to polkadot/node/subsystem-bench/src/lib/mock/dummy.rs diff --git a/polkadot/node/subsystem-bench/src/core/mock/mod.rs b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs similarity index 97% rename from polkadot/node/subsystem-bench/src/core/mock/mod.rs rename to polkadot/node/subsystem-bench/src/lib/mock/mod.rs index 46fdeb196c011587dd081851e9e1f31863a0530e..6dda9a47d398f3e6e952cd054bce9769e8942e70 100644 --- a/polkadot/node/subsystem-bench/src/core/mock/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs @@ -34,9 +34,10 @@ impl HeadSupportsParachains for AlwaysSupportsParachains { } // An orchestra with dummy subsystems +#[macro_export] macro_rules! dummy_builder { ($spawn_task_handle: ident, $metrics: ident) => {{ - use $crate::core::mock::dummy::*; + use $crate::mock::dummy::*; // Initialize a mock overseer. // All subsystem except approval_voting and approval_distribution are mock subsystems. @@ -72,7 +73,6 @@ macro_rules! dummy_builder { .spawner(SpawnGlue($spawn_task_handle)) }}; } -pub(crate) use dummy_builder; #[derive(Clone)] pub struct TestSyncOracle {} diff --git a/polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs similarity index 99% rename from polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs rename to polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs index 4682c7ec79ae3532858365b135636afefcb5400f..d598f6447d3d43ece5e1d0fa6364508403c032e9 100644 --- a/polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs @@ -17,7 +17,7 @@ //! Mocked `network-bridge` subsystems that uses a `NetworkInterface` to access //! the emulated network. -use crate::core::{ +use crate::{ configuration::TestAuthorities, network::{NetworkEmulatorHandle, NetworkInterfaceReceiver, NetworkMessage, RequestExt}, }; diff --git a/polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs similarity index 99% rename from polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs rename to polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs index 0dd76efcbaf0de5cdbd456d0bff91658ad0ac737..53faf562f03c005238d5297c29117fd261b49eca 100644 --- a/polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs @@ -16,7 +16,7 @@ //! A generic runtime api subsystem mockup suitable to be used in benchmarks. -use crate::core::configuration::{TestAuthorities, TestConfiguration}; +use crate::configuration::{TestAuthorities, TestConfiguration}; use bitvec::prelude::BitVec; use futures::FutureExt; use itertools::Itertools; diff --git a/polkadot/node/subsystem-bench/src/core/network.rs b/polkadot/node/subsystem-bench/src/lib/network.rs similarity index 99% rename from polkadot/node/subsystem-bench/src/core/network.rs rename to polkadot/node/subsystem-bench/src/lib/network.rs index e9124726d7c063f15eaf7b048b291b1b2115e897..1e09441792d59ee79f14297b34375ef7452c935e 100644 --- a/polkadot/node/subsystem-bench/src/core/network.rs +++ b/polkadot/node/subsystem-bench/src/lib/network.rs @@ -33,7 +33,7 @@ // | // Subsystems under test -use crate::core::{ +use crate::{ configuration::{random_latency, TestAuthorities, TestConfiguration}, environment::TestEnvironmentDependencies, NODE_UNDER_TEST, diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs new file mode 100644 index 0000000000000000000000000000000000000000..b83ef7d98d918e2c398f9f4b28558b1b93851051 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -0,0 +1,146 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test usage implementation + +use colored::Colorize; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +#[derive(Debug, Serialize, Deserialize)] +pub struct BenchmarkUsage { + pub benchmark_name: String, + pub network_usage: Vec, + pub cpu_usage: Vec, +} + +impl std::fmt::Display for BenchmarkUsage { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!( + f, + "\n{}\n\n{}\n{}\n\n{}\n{}\n", + self.benchmark_name.purple(), + format!("{:<32}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(), + self.network_usage + .iter() + .map(|v| v.to_string()) + .collect::>() + .join("\n"), + format!("{:<32}{:>12}{:>12}", "CPU usage, seconds", "total", "per block").blue(), + self.cpu_usage.iter().map(|v| v.to_string()).collect::>().join("\n") + ) + } +} + +impl BenchmarkUsage { + pub fn average(usages: &[Self]) -> Self { + let all_network_usages: Vec<&ResourceUsage> = + usages.iter().flat_map(|v| &v.network_usage).collect(); + let all_cpu_usage: Vec<&ResourceUsage> = usages.iter().flat_map(|v| &v.cpu_usage).collect(); + + Self { + benchmark_name: usages.first().map(|v| v.benchmark_name.clone()).unwrap_or_default(), + network_usage: ResourceUsage::average_by_resource_name(&all_network_usages), + cpu_usage: ResourceUsage::average_by_resource_name(&all_cpu_usage), + } + } + + pub fn check_network_usage(&self, checks: &[ResourceUsageCheck]) -> Vec { + check_usage(&self.benchmark_name, &self.network_usage, checks) + } + + pub fn check_cpu_usage(&self, checks: &[ResourceUsageCheck]) -> Vec { + check_usage(&self.benchmark_name, &self.cpu_usage, checks) + } + + pub fn cpu_usage_diff(&self, other: &Self, resource_name: &str) -> Option { + let self_res = self.cpu_usage.iter().find(|v| v.resource_name == resource_name); + let other_res = other.cpu_usage.iter().find(|v| v.resource_name == resource_name); + + match (self_res, other_res) { + (Some(self_res), Some(other_res)) => Some(self_res.diff(other_res)), + _ => None, + } + } +} + +fn check_usage( + benchmark_name: &str, + usage: &[ResourceUsage], + checks: &[ResourceUsageCheck], +) -> Vec { + checks + .iter() + .filter_map(|check| { + check_resource_usage(usage, check) + .map(|message| format!("{}: {}", benchmark_name, message)) + }) + .collect() +} + +fn check_resource_usage( + usage: &[ResourceUsage], + (resource_name, base, precision): &ResourceUsageCheck, +) -> Option { + if let Some(usage) = usage.iter().find(|v| v.resource_name == *resource_name) { + let diff = (base - usage.per_block).abs() / base; + if diff < *precision { + None + } else { + Some(format!( + "The resource `{}` is expected to be equal to {} with a precision {}, but the current value is {}", + resource_name, base, precision, usage.per_block + )) + } + } else { + Some(format!("The resource `{}` is not found", resource_name)) + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ResourceUsage { + pub resource_name: String, + pub total: f64, + pub per_block: f64, +} + +impl std::fmt::Display for ResourceUsage { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{:<32}{:>12.3}{:>12.3}", self.resource_name.cyan(), self.total, self.per_block) + } +} + +impl ResourceUsage { + fn average_by_resource_name(usages: &[&Self]) -> Vec { + let mut by_name: HashMap> = Default::default(); + for usage in usages { + by_name.entry(usage.resource_name.clone()).or_default().push(usage); + } + let mut average = vec![]; + for (resource_name, values) in by_name { + let total = values.iter().map(|v| v.total).sum::() / values.len() as f64; + let per_block = values.iter().map(|v| v.per_block).sum::() / values.len() as f64; + average.push(Self { resource_name, total, per_block }); + } + average + } + + fn diff(&self, other: &Self) -> f64 { + (self.per_block - other.per_block).abs() / self.per_block + } +} + +type ResourceUsageCheck<'a> = (&'a str, f64, f64); diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 549e43a671d6b4f9bd59659647cdaff8a69125d4..f11291fd0ea8c157d72a24349e94196178529446 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -1112,6 +1112,9 @@ pub struct ProspectiveValidationDataRequest { /// is present in and the depths of that tree the candidate is present in. pub type FragmentTreeMembership = Vec<(Hash, Vec)>; +/// A collection of ancestor candidates of a parachain. +pub type Ancestors = HashSet; + /// Messages sent to the Prospective Parachains subsystem. #[derive(Debug)] pub enum ProspectiveParachainsMessage { @@ -1128,15 +1131,18 @@ pub enum ProspectiveParachainsMessage { /// has been backed. This requires that the candidate was successfully introduced in /// the past. CandidateBacked(ParaId, CandidateHash), - /// Get N backable candidate hashes along with their relay parents for the given parachain, - /// under the given relay-parent hash, which is a descendant of the given candidate hashes. + /// Try getting N backable candidate hashes along with their relay parents for the given + /// parachain, under the given relay-parent hash, which is a descendant of the given ancestors. + /// Timed out ancestors should not be included in the collection. /// N should represent the number of scheduled cores of this ParaId. - /// Returns `None` on the channel if no such candidate exists. + /// A timed out ancestor frees the cores of all of its descendants, so if there's a hole in the + /// supplied ancestor path, we'll get candidates that backfill those timed out slots first. It + /// may also return less/no candidates, if there aren't enough backable candidates recorded. GetBackableCandidates( Hash, ParaId, u32, - Vec, + Ancestors, oneshot::Sender>, ), /// Get the hypothetical frontier membership of candidates with the given properties diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 7f6183076101b4474e8059435b42a69b108fbb05..4039fc9127da95e19e0aca427740793c39131ab9 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -26,13 +26,13 @@ use polkadot_primitives::{ PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; -use sc_client_api::HeaderBackend; +use sc_client_api::{AuxStore, HeaderBackend}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::{ApiError, ApiExt, ProvideRuntimeApi}; use sp_authority_discovery::AuthorityDiscoveryApi; -use sp_blockchain::Info; +use sp_blockchain::{BlockStatus, Info}; use sp_consensus_babe::{BabeApi, Epoch}; -use sp_runtime::traits::{Header as HeaderT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use std::{collections::BTreeMap, sync::Arc}; /// Offers header utilities. @@ -595,3 +595,61 @@ where self.client.runtime_api().approval_voting_params(at) } } + +impl HeaderBackend for DefaultSubsystemClient +where + Client: HeaderBackend, + Block: sp_runtime::traits::Block, +{ + fn header( + &self, + hash: Block::Hash, + ) -> sc_client_api::blockchain::Result> { + self.client.header(hash) + } + + fn info(&self) -> Info { + self.client.info() + } + + fn status(&self, hash: Block::Hash) -> sc_client_api::blockchain::Result { + self.client.status(hash) + } + + fn number( + &self, + hash: Block::Hash, + ) -> sc_client_api::blockchain::Result::Header as HeaderT>::Number>> { + self.client.number(hash) + } + + fn hash( + &self, + number: NumberFor, + ) -> sc_client_api::blockchain::Result> { + self.client.hash(number) + } +} + +impl AuxStore for DefaultSubsystemClient +where + Client: AuxStore, +{ + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { + self.client.insert_aux(insert, delete) + } + + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { + self.client.get_aux(key) + } +} diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index e7892abcd87bf09140b53f5f123798b2ccce7ac4..02b227e6f345758d8b53b7da0b0cf3ddc2b77d05 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -71,6 +71,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-staking/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", diff --git a/polkadot/node/test/service/src/chain_spec.rs b/polkadot/node/test/service/src/chain_spec.rs index 4cc387317c3f0f15990825ee377e07ae82925073..f14fa9fde58b4426690cd1bd0e16129f095c34ec 100644 --- a/polkadot/node/test/service/src/chain_spec.rs +++ b/polkadot/node/test/service/src/chain_spec.rs @@ -19,7 +19,9 @@ use babe_primitives::AuthorityId as BabeId; use grandpa::AuthorityId as GrandpaId; use pallet_staking::Forcing; -use polkadot_primitives::{AccountId, AssignmentId, ValidatorId, MAX_CODE_SIZE, MAX_POV_SIZE}; +use polkadot_primitives::{ + vstaging::SchedulerParams, AccountId, AssignmentId, ValidatorId, MAX_CODE_SIZE, MAX_POV_SIZE, +}; use polkadot_service::chain_spec::{get_account_id_from_seed, get_from_seed, Extensions}; use polkadot_test_runtime::BABE_GENESIS_EPOCH_CONFIG; use sc_chain_spec::{ChainSpec, ChainType}; @@ -165,11 +167,14 @@ fn polkadot_testnet_genesis( max_code_size: MAX_CODE_SIZE, max_pov_size: MAX_POV_SIZE, max_head_data_size: 32 * 1024, - group_rotation_frequency: 20, - paras_availability_period: 4, no_show_slots: 10, minimum_validation_upgrade_delay: 5, max_downward_message_size: 1024, + scheduler_params: SchedulerParams { + group_rotation_frequency: 20, + paras_availability_period: 4, + ..Default::default() + }, ..Default::default() }, } diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index ca904bae28db2dcc2f0a2e50e1cf92528a135003..1876595c7b40b6b8a9311a606a8430e6fcd42a08 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -32,7 +32,7 @@ use polkadot_service::{ Error, FullClient, IsParachainNode, NewFull, OverseerGen, PrometheusConfig, }; use polkadot_test_runtime::{ - ParasCall, ParasSudoWrapperCall, Runtime, SignedExtra, SignedPayload, SudoCall, + ParasCall, ParasSudoWrapperCall, Runtime, SignedPayload, SudoCall, TxExtension, UncheckedExtrinsic, VERSION, }; @@ -382,7 +382,7 @@ pub fn construct_extrinsic( let period = BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -391,10 +391,11 @@ pub fn construct_extrinsic( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ); + ) + .into(); let raw_payload = SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ( (), VERSION.spec_version, @@ -411,7 +412,7 @@ pub fn construct_extrinsic( function.clone(), polkadot_test_runtime::Address::Id(caller.public().into()), polkadot_primitives::Signature::Sr25519(signature.clone()), - extra.clone(), + tx_ext.clone(), ) } diff --git a/polkadot/primitives/src/v6/executor_params.rs b/polkadot/primitives/src/v6/executor_params.rs index 112a529f62b0570e9270d1f09e397e6b9dc358b0..1e19f3b23fec9b9889d5976380f52ab6b66072b4 100644 --- a/polkadot/primitives/src/v6/executor_params.rs +++ b/polkadot/primitives/src/v6/executor_params.rs @@ -159,7 +159,9 @@ impl sp_std::fmt::LowerHex for ExecutorParamsHash { // into individual fields of the structure. Thus, complex migrations shall be avoided when adding // new entries and removing old ones. At the moment, there's no mandatory parameters defined. If // they show up, they must be clearly documented as mandatory ones. -#[derive(Clone, Debug, Encode, Decode, PartialEq, Eq, TypeInfo, Serialize, Deserialize)] +#[derive( + Clone, Debug, Default, Encode, Decode, PartialEq, Eq, TypeInfo, Serialize, Deserialize, +)] pub struct ExecutorParams(Vec); impl ExecutorParams { @@ -334,9 +336,3 @@ impl From<&[ExecutorParam]> for ExecutorParams { ExecutorParams(arr.to_vec()) } } - -impl Default for ExecutorParams { - fn default() -> Self { - ExecutorParams(vec![]) - } -} diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 39d9dfc02c5bfa01281559852a5beed850aea1a8..bd2a5106c44de5895fb327957161c84f13a983b0 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -23,6 +23,7 @@ use sp_std::prelude::*; use parity_scale_codec::{Decode, Encode}; use primitives::RuntimeDebug; use scale_info::TypeInfo; +use sp_arithmetic::Perbill; /// Approval voting configuration parameters #[derive( @@ -50,6 +51,81 @@ impl Default for ApprovalVotingParams { } } +/// Scheduler configuration parameters. All coretime/ondemand parameters are here. +#[derive( + RuntimeDebug, + Copy, + Clone, + PartialEq, + Encode, + Decode, + TypeInfo, + serde::Serialize, + serde::Deserialize, +)] +pub struct SchedulerParams { + /// How often parachain groups should be rotated across parachains. + /// + /// Must be non-zero. + pub group_rotation_frequency: BlockNumber, + /// Availability timeout for a block on a core, measured in blocks. + /// + /// This is the maximum amount of blocks after a core became occupied that validators have time + /// to make the block available. + /// + /// This value only has effect on group rotations. If backers backed something at the end of + /// their rotation, the occupied core affects the backing group that comes afterwards. We limit + /// the effect one backing group can have on the next to `paras_availability_period` blocks. + /// + /// Within a group rotation there is no timeout as backers are only affecting themselves. + /// + /// Must be at least 1. With a value of 1, the previous group will not be able to negatively + /// affect the following group at the expense of a tight availability timeline at group + /// rotation boundaries. + pub paras_availability_period: BlockNumber, + /// The maximum number of validators to have per core. + /// + /// `None` means no maximum. + pub max_validators_per_core: Option, + /// The amount of blocks ahead to schedule paras. + pub lookahead: u32, + /// How many cores are managed by the coretime chain. + pub num_cores: u32, + /// The max number of times a claim can time out in availability. + pub max_availability_timeouts: u32, + /// The maximum queue size of the pay as you go module. + pub on_demand_queue_max_size: u32, + /// The target utilization of the spot price queue in percentages. + pub on_demand_target_queue_utilization: Perbill, + /// How quickly the fee rises in reaction to increased utilization. + /// The lower the number the slower the increase. + pub on_demand_fee_variability: Perbill, + /// The minimum amount needed to claim a slot in the spot pricing queue. + pub on_demand_base_fee: Balance, + /// The number of blocks a claim stays in the scheduler's claimqueue before getting cleared. + /// This number should go reasonably higher than the number of blocks in the async backing + /// lookahead. + pub ttl: BlockNumber, +} + +impl> Default for SchedulerParams { + fn default() -> Self { + Self { + group_rotation_frequency: 1u32.into(), + paras_availability_period: 1u32.into(), + max_validators_per_core: Default::default(), + lookahead: 1, + num_cores: Default::default(), + max_availability_timeouts: Default::default(), + on_demand_queue_max_size: ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + on_demand_target_queue_utilization: Perbill::from_percent(25), + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_base_fee: 10_000_000u128, + ttl: 5u32.into(), + } + } +} + use bitvec::vec::BitVec; /// Bit indices in the `HostConfiguration.node_features` that correspond to different node features. diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md index e0984bd58d1dd8ea22b145a15b4a3bab8779de80..39a317a07c847aa734e78f85de524f5541c8ddf6 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md @@ -125,6 +125,14 @@ execution request: reason, which may or may not be independent of the candidate or PVF. 5. **Internal errors:** See "Internal Errors" section. In this case, after the retry we abstain from voting. +6. **RuntimeConstruction** error. The precheck handles a general case of a wrong + artifact but doesn't guarantee its consistency between the preparation and + the execution. If something happened with the artifact between + the preparation of the artifact and its execution (e.g. the artifact was + corrupted on disk or a dirty node upgrade happened when the prepare worker + has a wasmtime version different from the execute worker's wasmtime version). + We treat such an error as possibly transient due to local issues and retry + one time. ### Preparation timeouts diff --git a/polkadot/roadmap/implementers-guide/src/types/runtime.md b/polkadot/roadmap/implementers-guide/src/types/runtime.md index 4b97409f8df3e4f3581161ef47713bfdfbcb9654..1dfabd96db7cdb929c416201eeeb233af11f5dbb 100644 --- a/polkadot/roadmap/implementers-guide/src/types/runtime.md +++ b/polkadot/roadmap/implementers-guide/src/types/runtime.md @@ -4,106 +4,24 @@ Types used within the runtime exclusively and pervasively. ## Host Configuration -The internal-to-runtime configuration of the parachain host. This is expected to be altered only by governance procedures. - -```rust -struct HostConfiguration { - /// The minimum period, in blocks, between which parachains can update their validation code. - pub validation_upgrade_cooldown: BlockNumber, - /// The delay, in blocks, before a validation upgrade is applied. - pub validation_upgrade_delay: BlockNumber, - /// How long to keep code on-chain, in blocks. This should be sufficiently long that disputes - /// have concluded. - pub code_retention_period: BlockNumber, - /// The maximum validation code size, in bytes. - pub max_code_size: u32, - /// The maximum head-data size, in bytes. - pub max_head_data_size: u32, - /// The amount of availability cores to dedicate to parathreads (on-demand parachains). - pub parathread_cores: u32, - /// The number of retries that a parathread (on-demand parachain) author has to submit their block. - pub parathread_retries: u32, - /// How often parachain groups should be rotated across parachains. - pub group_rotation_frequency: BlockNumber, - /// The availability period, in blocks, for parachains. This is the amount of blocks - /// after inclusion that validators have to make the block available and signal its availability to - /// the chain. Must be at least 1. - pub chain_availability_period: BlockNumber, - /// The availability period, in blocks, for parathreads (on-demand parachains). Same as the `chain_availability_period`, - /// but a differing timeout due to differing requirements. Must be at least 1. - pub thread_availability_period: BlockNumber, - /// The amount of blocks ahead to schedule on-demand parachains. - pub scheduling_lookahead: u32, - /// The maximum number of validators to have per core. `None` means no maximum. - pub max_validators_per_core: Option, - /// The maximum number of validators to use for parachains, in total. `None` means no maximum. - pub max_validators: Option, - /// The amount of sessions to keep for disputes. - pub dispute_period: SessionIndex, - /// How long after dispute conclusion to accept statements. - pub dispute_post_conclusion_acceptance_period: BlockNumber, - /// The maximum number of dispute spam slots - pub dispute_max_spam_slots: u32, - /// The amount of consensus slots that must pass between submitting an assignment and - /// submitting an approval vote before a validator is considered a no-show. - /// Must be at least 1. - pub no_show_slots: u32, - /// The number of delay tranches in total. - pub n_delay_tranches: u32, - /// The width of the zeroth delay tranche for approval assignments. This many delay tranches - /// beyond 0 are all consolidated to form a wide 0 tranche. - pub zeroth_delay_tranche_width: u32, - /// The number of validators needed to approve a block. - pub needed_approvals: u32, - /// The number of samples to use in `RelayVRFModulo` or `RelayVRFModuloCompact` approval assignment criterions. - pub relay_vrf_modulo_samples: u32, - /// Total number of individual messages allowed in the parachain -> relay-chain message queue. - pub max_upward_queue_count: u32, - /// Total size of messages allowed in the parachain -> relay-chain message queue before which - /// no further messages may be added to it. If it exceeds this then the queue may contain only - /// a single message. - pub max_upward_queue_size: u32, - /// The maximum size of an upward message that can be sent by a candidate. - /// - /// This parameter affects the upper bound of size of `CandidateCommitments`. - pub max_upward_message_size: u32, - /// The maximum number of messages that a candidate can contain. - /// - /// This parameter affects the upper bound of size of `CandidateCommitments`. - pub max_upward_message_num_per_candidate: u32, - /// The maximum size of a message that can be put in a downward message queue. - /// - /// Since we require receiving at least one DMP message the obvious upper bound of the size is - /// the PoV size. Of course, there is a lot of other different things that a parachain may - /// decide to do with its PoV so this value in practice will be picked as a fraction of the PoV - /// size. - pub max_downward_message_size: u32, - /// The deposit that the sender should provide for opening an HRMP channel. - pub hrmp_sender_deposit: u32, - /// The deposit that the recipient should provide for accepting opening an HRMP channel. - pub hrmp_recipient_deposit: u32, - /// The maximum number of messages allowed in an HRMP channel at once. - pub hrmp_channel_max_capacity: u32, - /// The maximum total size of messages in bytes allowed in an HRMP channel at once. - pub hrmp_channel_max_total_size: u32, - /// The maximum number of inbound HRMP channels a parachain is allowed to accept. - pub hrmp_max_parachain_inbound_channels: u32, - /// The maximum number of inbound HRMP channels a parathread (on-demand parachain) is allowed to accept. - pub hrmp_max_parathread_inbound_channels: u32, - /// The maximum size of a message that could ever be put into an HRMP channel. - /// - /// This parameter affects the upper bound of size of `CandidateCommitments`. - pub hrmp_channel_max_message_size: u32, - /// The maximum number of outbound HRMP channels a parachain is allowed to open. - pub hrmp_max_parachain_outbound_channels: u32, - /// The maximum number of outbound HRMP channels a parathread (on-demand parachain) is allowed to open. - pub hrmp_max_parathread_outbound_channels: u32, - /// The maximum number of outbound HRMP messages can be sent by a candidate. - /// - /// This parameter affects the upper bound of size of `CandidateCommitments`. - pub hrmp_max_message_num_per_candidate: u32, -} -``` +The internal-to-runtime configuration of the parachain host is kept in `struct HostConfiguration`. This is expected to +be altered only by governance procedures or via migrations from the Polkadot-SDK codebase. The latest definition of +`HostConfiguration` can be found in the project repo +[here](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/runtime/parachains/src/configuration.rs). Each +parameter has got a doc comment so for any details please refer to the code. + +Some related parameters in `HostConfiguration` are grouped together so that they can be managed easily. These are: +* `async_backing_params` in `struct AsyncBackingParams` +* `executor_params` in `struct ExecutorParams` +* `approval_voting_params` in `struct ApprovalVotingParams` +* `scheduler_params` in `struct SchedulerParams` + +Check the definitions of these structs for further details. + +### Configuration migrations +Modifying `HostConfiguration` requires a storage migration. These migrations are located in the +[`migrations`](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/runtime/parachains/src/configuration.rs) +subfolder of Polkadot-SDK repo. ## ParaInherentData diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index eae5d4fb2ef90a0acb515b863dfb0013c45bac89..c2205938295e38f8e683a211a09b6e3bce777e38 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -132,6 +132,7 @@ runtime-benchmarks = [ "pallet-identity/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "primitives/runtime-benchmarks", diff --git a/polkadot/runtime/common/src/assigned_slots/migration.rs b/polkadot/runtime/common/src/assigned_slots/migration.rs index ba3108c0aa38b9b9a2dabba0559d74ec3c97d0ed..def6bad692a235d3282774b0db3fabdaf33c89b3 100644 --- a/polkadot/runtime/common/src/assigned_slots/migration.rs +++ b/polkadot/runtime/common/src/assigned_slots/migration.rs @@ -29,14 +29,14 @@ pub mod v1 { impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - let onchain_version = Pallet::::on_chain_storage_version(); - ensure!(onchain_version < 1, "assigned_slots::MigrateToV1 migration can be deleted"); + let on_chain_version = Pallet::::on_chain_storage_version(); + ensure!(on_chain_version < 1, "assigned_slots::MigrateToV1 migration can be deleted"); Ok(Default::default()) } fn on_runtime_upgrade() -> frame_support::weights::Weight { - let onchain_version = Pallet::::on_chain_storage_version(); - if onchain_version < 1 { + let on_chain_version = Pallet::::on_chain_storage_version(); + if on_chain_version < 1 { const MAX_PERMANENT_SLOTS: u32 = 100; const MAX_TEMPORARY_SLOTS: u32 = 100; @@ -52,8 +52,8 @@ pub mod v1 { #[cfg(feature = "try-runtime")] fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { - let onchain_version = Pallet::::on_chain_storage_version(); - ensure!(onchain_version == 1, "assigned_slots::MigrateToV1 needs to be run"); + let on_chain_version = Pallet::::on_chain_storage_version(); + ensure!(on_chain_version == 1, "assigned_slots::MigrateToV1 needs to be run"); assert_eq!(>::get(), 100); assert_eq!(>::get(), 100); Ok(()) diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index 3419e3497f7d446f7bea02bf8134932c0a0cdbf6..4f032f4dfa3bd76b2e3ced899cec3a0bb5b06ffe 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -107,7 +107,7 @@ type LeasePeriodOf = <::Leaser as Leaser>>::Le pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/polkadot/runtime/common/src/claims.rs b/polkadot/runtime/common/src/claims.rs index 86550ea8b4ebf97054ce6713fffcdd0dd2cf628e..57a95b22af3801dc22ba19cdee61b26c1cfe6f78 100644 --- a/polkadot/runtime/common/src/claims.rs +++ b/polkadot/runtime/common/src/claims.rs @@ -29,7 +29,11 @@ use scale_info::TypeInfo; use serde::{self, Deserialize, Deserializer, Serialize, Serializer}; use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; use sp_runtime::{ - traits::{CheckedSub, DispatchInfoOf, SignedExtension, Zero}, + impl_tx_ext_default, + traits::{ + AsSystemOriginSigner, CheckedSub, DispatchInfoOf, Dispatchable, TransactionExtension, + TransactionExtensionBase, Zero, + }, transaction_validity::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, @@ -50,6 +54,7 @@ pub trait WeightInfo { fn claim_attest() -> Weight; fn attest() -> Weight; fn move_claim() -> Weight; + fn prevalidate_attests() -> Weight; } pub struct TestWeightInfo; @@ -69,6 +74,9 @@ impl WeightInfo for TestWeightInfo { fn move_claim() -> Weight { Weight::zero() } + fn prevalidate_attests() -> Weight { + Weight::zero() + } } /// The kind of statement an account needs to make for a claim to be valid. @@ -84,7 +92,7 @@ pub enum StatementKind { impl StatementKind { /// Convert this to the (English) statement it represents. - fn to_text(self) -> &'static [u8] { + pub fn to_text(self) -> &'static [u8] { match self { StatementKind::Regular => &b"I hereby agree to the terms of the statement whose SHA-256 multihash is \ @@ -166,6 +174,13 @@ pub mod pallet { #[pallet::without_storage_info] pub struct Pallet(_); + #[cfg(feature = "runtime-benchmarks")] + /// Helper trait to benchmark the `PrevalidateAttests` transaction extension. + pub trait BenchmarkHelperTrait { + /// `Call` to be used when benchmarking the transaction extension. + fn default_call_and_info() -> (RuntimeCall, DispatchInfo); + } + /// Configuration trait. #[pallet::config] pub trait Config: frame_system::Config { @@ -176,6 +191,12 @@ pub mod pallet { type Prefix: Get<&'static [u8]>; type MoveClaimOrigin: EnsureOrigin; type WeightInfo: WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + /// Benchmark helper + type BenchmarkHelper: BenchmarkHelperTrait< + Self::RuntimeCall, + DispatchInfoOf, + >; } #[pallet::event] @@ -402,7 +423,7 @@ pub mod pallet { /// Attest to a statement, needed to finalize the claims process. /// /// WARNING: Insecure unless your chain includes `PrevalidateAttests` as a - /// `SignedExtension`. + /// `TransactionExtension`. /// /// Unsigned Validation: /// A call to attest is deemed valid if the sender has a `Preclaim` registered @@ -612,46 +633,46 @@ impl PrevalidateAttests where ::RuntimeCall: IsSubType>, { - /// Create new `SignedExtension` to check runtime version. + /// Create new `TransactionExtension` to check runtime version. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for PrevalidateAttests +impl TransactionExtensionBase for PrevalidateAttests where ::RuntimeCall: IsSubType>, { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); const IDENTIFIER: &'static str = "PrevalidateAttests"; + type Implicit = (); +} - fn additional_signed(&self) -> Result { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } +impl TransactionExtension for PrevalidateAttests +where + ::RuntimeCall: IsSubType>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner + Clone, +{ + type Pre = (); + type Val = (); // // The weight of this logic is included in the `attest` dispatchable. // fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; if let Some(local_call) = call.is_sub_type() { if let Call::attest { statement: attested_statement } = local_call { let signer = Preclaims::::get(who) @@ -662,8 +683,9 @@ where } } } - Ok(ValidTransaction::default()) + Ok((ValidTransaction::default(), (), origin)) } + impl_tx_ext_default!(T::RuntimeCall; Context; prepare); } #[cfg(any(test, feature = "runtime-benchmarks"))] @@ -696,13 +718,12 @@ mod secp_utils { } #[cfg(test)] -mod tests { +pub(super) mod tests { use super::*; use hex_literal::hex; use secp_utils::*; use parity_scale_codec::Encode; - use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use crate::claims; @@ -715,7 +736,7 @@ mod tests { }; use pallet_balances; use sp_runtime::{ - traits::{BlakeTwo256, Identity, IdentityLookup}, + traits::{DispatchTransaction, Identity}, transaction_validity::TransactionLongevity, BuildStorage, DispatchError::BadOrigin, @@ -734,34 +755,13 @@ mod tests { } ); - parameter_types! { - pub const BlockHashCount: u32 = 250; - } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } @@ -815,6 +815,19 @@ mod tests { type Prefix = Prefix; type MoveClaimOrigin = frame_system::EnsureSignedBy; type WeightInfo = TestWeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); + } + + #[cfg(feature = "runtime-benchmarks")] + impl BenchmarkHelperTrait> for () { + fn default_call_and_info() -> (RuntimeCall, DispatchInfoOf) { + let call = RuntimeCall::Claims(crate::claims::Call::attest { + statement: StatementKind::Regular.to_text().to_vec(), + }); + let info = call.get_dispatch_info(); + (call, info) + } } fn alice() -> libsecp256k1::SecretKey { @@ -1096,8 +1109,8 @@ mod tests { }); let di = c.get_dispatch_info(); assert_eq!(di.pays_fee, Pays::No); - let r = p.validate(&42, &c, &di, 20); - assert_eq!(r, TransactionValidity::Ok(ValidTransaction::default())); + let r = p.validate_only(Some(42).into(), &c, &di, 20); + assert_eq!(r.unwrap().0, ValidTransaction::default()); }); } @@ -1109,13 +1122,13 @@ mod tests { statement: StatementKind::Regular.to_text().to_vec(), }); let di = c.get_dispatch_info(); - let r = p.validate(&42, &c, &di, 20); + let r = p.validate_only(Some(42).into(), &c, &di, 20); assert!(r.is_err()); let c = RuntimeCall::Claims(ClaimsCall::attest { statement: StatementKind::Saft.to_text().to_vec(), }); let di = c.get_dispatch_info(); - let r = p.validate(&69, &c, &di, 20); + let r = p.validate_only(Some(69).into(), &c, &di, 20); assert!(r.is_err()); }); } @@ -1469,14 +1482,17 @@ mod tests { } #[cfg(feature = "runtime-benchmarks")] -mod benchmarking { +pub(super) mod benchmarking { use super::*; use crate::claims::Call; use frame_benchmarking::{account, benchmarks}; use frame_support::traits::UnfilteredDispatchable; use frame_system::RawOrigin; use secp_utils::*; - use sp_runtime::{traits::ValidateUnsigned, DispatchResult}; + use sp_runtime::{ + traits::{DispatchTransaction, ValidateUnsigned}, + DispatchResult, + }; const SEED: u32 = 0; @@ -1512,6 +1528,11 @@ mod benchmarking { } benchmarks! { + where_clause { where ::RuntimeCall: IsSubType>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: AsSystemOriginSigner + Clone, + <::RuntimeCall as Dispatchable>::PostInfo: Default, + } + // Benchmark `claim` including `validate_unsigned` logic. claim { let c = MAX_CLAIMS; @@ -1690,6 +1711,38 @@ mod benchmarking { } } + prevalidate_attests { + let c = MAX_CLAIMS; + + for i in 0 .. c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + + let ext = PrevalidateAttests::::new(); + let (call, info) = T::BenchmarkHelper::default_call_and_info(); + let attest_c = u32::MAX - c; + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); + let eth_address = eth(&secret_key); + let account: T::AccountId = account("user", c, SEED); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + let statement = StatementKind::Regular; + let signature = sig::(&secret_key, &account.encode(), statement.to_text()); + super::Pallet::::mint_claim(RawOrigin::Root.into(), eth_address, VALUE.into(), vesting, Some(statement))?; + Preclaims::::insert(&account, eth_address); + assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); + }: { + assert!(ext.test_run( + RawOrigin::Signed(account).into(), + &call, + &info, + 0, + |_| { + Ok(Default::default()) + } + ).unwrap().is_ok()); + } + impl_benchmark_test_suite!( Pallet, crate::claims::tests::new_test_ext(), diff --git a/polkadot/runtime/common/src/crowdloan/migration.rs b/polkadot/runtime/common/src/crowdloan/migration.rs index 5133c14ada9276a56ae91f38e8a67b4846979866..3afd6b3fbc94b5b30bad162cfcf8236a029b77da 100644 --- a/polkadot/runtime/common/src/crowdloan/migration.rs +++ b/polkadot/runtime/common/src/crowdloan/migration.rs @@ -24,9 +24,9 @@ use frame_support::{ pub struct MigrateToTrackInactiveV2(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToTrackInactiveV2 { fn on_runtime_upgrade() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 1 { + if on_chain_version == 1 { let mut translated = 0u64; for item in Funds::::iter_values() { let b = diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 7d1b892dfa7fc6c4057a7da8731e39a9a4b2d3e8..35d075f2ff6c2f5ee3c1fb4a159eed346d6c6781 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -180,7 +180,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[pallet::pallet] diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 5450c4309e40745f36ae405a8614fe9e8ad2f534..4541b88ec6fe187988a949cda1338de59f258541 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -106,7 +106,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs index 71c3f1fa39f7c6c127ac954b8299fb16a323e72d..e2ba0b4f7ea5d5ca2eb49c903afb764229e0491f 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs @@ -64,11 +64,12 @@ impl GenesisConfigBuilder { pub(super) fn build(self) -> MockGenesisConfig { let mut genesis = default_genesis_config(); let config = &mut genesis.configuration.config; - config.coretime_cores = self.on_demand_cores; - config.on_demand_base_fee = self.on_demand_base_fee; - config.on_demand_fee_variability = self.on_demand_fee_variability; - config.on_demand_queue_max_size = self.on_demand_max_queue_size; - config.on_demand_target_queue_utilization = self.on_demand_target_queue_utilization; + config.scheduler_params.num_cores = self.on_demand_cores; + config.scheduler_params.on_demand_base_fee = self.on_demand_base_fee; + config.scheduler_params.on_demand_fee_variability = self.on_demand_fee_variability; + config.scheduler_params.on_demand_queue_max_size = self.on_demand_max_queue_size; + config.scheduler_params.on_demand_target_queue_utilization = + self.on_demand_target_queue_utilization; let paras = &mut genesis.paras.paras; for para_id in self.onboarded_on_demand_chains { diff --git a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs index 9da81dc816cabeb7019e44b8f88c0f526582830d..e2da89fadd4800f5de32650178766e1ed85bbd17 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs @@ -30,7 +30,7 @@ mod tests; use crate::{ assigner_on_demand, configuration, paras::AssignCoretime, - scheduler::common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, + scheduler::common::{Assignment, AssignmentProvider}, ParaId, }; @@ -316,14 +316,6 @@ impl AssignmentProvider> for Pallet { } } - fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig> { - let config = >::config(); - AssignmentProviderConfig { - max_availability_timeouts: config.on_demand_retries, - ttl: config.on_demand_ttl, - } - } - #[cfg(any(feature = "runtime-benchmarks", test))] fn get_mock_assignment(_: CoreIndex, para_id: primitives::Id) -> Assignment { // Given that we are not tracking anything in `Bulk` assignments, it is safe to always @@ -333,7 +325,7 @@ impl AssignmentProvider> for Pallet { fn session_core_count() -> u32 { let config = >::config(); - config.coretime_cores + config.scheduler_params.num_cores } } @@ -482,8 +474,8 @@ impl AssignCoretime for Pallet { // Add a new core and assign the para to it. let mut config = >::config(); - let core = config.coretime_cores; - config.coretime_cores.saturating_inc(); + let core = config.scheduler_params.num_cores; + config.scheduler_params.num_cores.saturating_inc(); // `assign_coretime` is only called at genesis or by root, so setting the active // config here is fine. diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs index 5a6060cd2b4eab88867088dee30b6fb1047bdf20..8360e7a78d0a5a155f5b9d63ffae05bb85e44f32 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs @@ -43,7 +43,7 @@ where { ParasShared::::set_session_index(SESSION_INDEX); let mut config = HostConfiguration::default(); - config.coretime_cores = 1; + config.scheduler_params.num_cores = 1; ConfigurationPallet::::force_set_active_config(config); let mut parachains = ParachainsCache::new(); ParasPallet::::initialize_para_now( diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs index de30330ac84e0a7715799d71d26fb42ce48efff8..f8d1a894f0e4d5c619377ba4c34158c8f6d60597 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs @@ -63,11 +63,12 @@ impl GenesisConfigBuilder { pub(super) fn build(self) -> MockGenesisConfig { let mut genesis = default_genesis_config(); let config = &mut genesis.configuration.config; - config.coretime_cores = self.on_demand_cores; - config.on_demand_base_fee = self.on_demand_base_fee; - config.on_demand_fee_variability = self.on_demand_fee_variability; - config.on_demand_queue_max_size = self.on_demand_max_queue_size; - config.on_demand_target_queue_utilization = self.on_demand_target_queue_utilization; + config.scheduler_params.num_cores = self.on_demand_cores; + config.scheduler_params.on_demand_base_fee = self.on_demand_base_fee; + config.scheduler_params.on_demand_fee_variability = self.on_demand_fee_variability; + config.scheduler_params.on_demand_queue_max_size = self.on_demand_max_queue_size; + config.scheduler_params.on_demand_target_queue_utilization = + self.on_demand_target_queue_utilization; let paras = &mut genesis.paras.paras; for para_id in self.onboarded_on_demand_chains { diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 1b746e88694c9f105db119d351a76e336fd3fdba..bc450dc78129ad7404627e49b9882e2723c09f08 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -201,10 +201,10 @@ pub mod pallet { let old_traffic = SpotTraffic::::get(); match Self::calculate_spot_traffic( old_traffic, - config.on_demand_queue_max_size, + config.scheduler_params.on_demand_queue_max_size, Self::queue_size(), - config.on_demand_target_queue_utilization, - config.on_demand_fee_variability, + config.scheduler_params.on_demand_target_queue_utilization, + config.scheduler_params.on_demand_fee_variability, ) { Ok(new_traffic) => { // Only update storage on change @@ -330,8 +330,9 @@ where let traffic = SpotTraffic::::get(); // Calculate spot price - let spot_price: BalanceOf = - traffic.saturating_mul_int(config.on_demand_base_fee.saturated_into::>()); + let spot_price: BalanceOf = traffic.saturating_mul_int( + config.scheduler_params.on_demand_base_fee.saturated_into::>(), + ); // Is the current price higher than `max_amount` ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); @@ -450,7 +451,10 @@ where OnDemandQueue::::try_mutate(|queue| { // Abort transaction if queue is too large - ensure!(Self::queue_size() < config.on_demand_queue_max_size, Error::::QueueFull); + ensure!( + Self::queue_size() < config.scheduler_params.on_demand_queue_max_size, + Error::::QueueFull + ); match location { QueuePushDirection::Back => queue.push_back(order), QueuePushDirection::Front => queue.push_front(order), @@ -472,7 +476,7 @@ where target: LOG_TARGET, "Failed to fetch the on demand queue size, returning the max size." ); - return config.on_demand_queue_max_size + return config.scheduler_params.on_demand_queue_max_size }, } } diff --git a/polkadot/runtime/parachains/src/assigner_parachains.rs b/polkadot/runtime/parachains/src/assigner_parachains.rs index 34b5d3c1ec51811e8e4c50255592b1e9344014d8..b5f342563e9763e9dba0fa8cbc7afd8e84dc59da 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains.rs @@ -27,7 +27,7 @@ use primitives::CoreIndex; use crate::{ configuration, paras, - scheduler::common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, + scheduler::common::{Assignment, AssignmentProvider}, }; pub use pallet::*; @@ -58,16 +58,6 @@ impl AssignmentProvider> for Pallet { /// this is a no-op in the case of a bulk assignment slot. fn push_back_assignment(_: Assignment) {} - fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig> { - AssignmentProviderConfig { - // The next assignment already goes to the same [`ParaId`], no timeout tracking needed. - max_availability_timeouts: 0, - // The next assignment already goes to the same [`ParaId`], this can be any number - // that's high enough to clear the time it takes to clear backing/availability. - ttl: 10u32.into(), - } - } - #[cfg(any(feature = "runtime-benchmarks", test))] fn get_mock_assignment(_: CoreIndex, para_id: primitives::Id) -> Assignment { Assignment::Bulk(para_id) diff --git a/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs index e6e9fb074aa97e87e6fe92819cc57e5bfa2ca656..a46e114daeaf458fb6bc985a5bb2ebad951d7e3b 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs @@ -60,11 +60,12 @@ impl GenesisConfigBuilder { pub(super) fn build(self) -> MockGenesisConfig { let mut genesis = default_genesis_config(); let config = &mut genesis.configuration.config; - config.coretime_cores = self.on_demand_cores; - config.on_demand_base_fee = self.on_demand_base_fee; - config.on_demand_fee_variability = self.on_demand_fee_variability; - config.on_demand_queue_max_size = self.on_demand_max_queue_size; - config.on_demand_target_queue_utilization = self.on_demand_target_queue_utilization; + config.scheduler_params.num_cores = self.on_demand_cores; + config.scheduler_params.on_demand_base_fee = self.on_demand_base_fee; + config.scheduler_params.on_demand_fee_variability = self.on_demand_fee_variability; + config.scheduler_params.on_demand_queue_max_size = self.on_demand_max_queue_size; + config.scheduler_params.on_demand_target_queue_utilization = + self.on_demand_target_queue_utilization; let paras = &mut genesis.paras.paras; for para_id in self.onboarded_on_demand_chains { diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 500bc70cfa75294e5d08a590a6a90f3eb8b3acf1..5b218addb1a2552d3f9daa3dbb7baf352023f919 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -18,23 +18,20 @@ use crate::{ configuration, inclusion, initializer, paras, paras::ParaKind, paras_inherent, - scheduler::{ - self, - common::{AssignmentProvider, AssignmentProviderConfig}, - CoreOccupied, ParasEntry, - }, + scheduler::{self, common::AssignmentProvider, CoreOccupied, ParasEntry}, session_info, shared, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use primitives::{ - collator_signature_payload, AvailabilityBitfield, BackedCandidate, CandidateCommitments, - CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, CommittedCandidateReceipt, - CompactStatement, CoreIndex, DisputeStatement, DisputeStatementSet, GroupIndex, HeadData, - Id as ParaId, IndexedVec, InherentData as ParachainsInherentData, InvalidDisputeStatementKind, - PersistedValidationData, SessionIndex, SigningContext, UncheckedSigned, - ValidDisputeStatementKind, ValidationCode, ValidatorId, ValidatorIndex, ValidityAttestation, + collator_signature_payload, vstaging::node_features::FeatureIndex, AvailabilityBitfield, + BackedCandidate, CandidateCommitments, CandidateDescriptor, CandidateHash, CollatorId, + CollatorSignature, CommittedCandidateReceipt, CompactStatement, CoreIndex, DisputeStatement, + DisputeStatementSet, GroupIndex, HeadData, Id as ParaId, IndexedVec, + InherentData as ParachainsInherentData, InvalidDisputeStatementKind, PersistedValidationData, + SessionIndex, SigningContext, UncheckedSigned, ValidDisputeStatementKind, ValidationCode, + ValidatorId, ValidatorIndex, ValidityAttestation, }; use sp_core::{sr25519, H256}; use sp_runtime::{ @@ -197,7 +194,10 @@ impl BenchBuilder { /// Maximum number of validators per core (a.k.a. max validators per group). This value is used /// if none is explicitly set on the builder. pub(crate) fn fallback_max_validators_per_core() -> u32 { - configuration::Pallet::::config().max_validators_per_core.unwrap_or(5) + configuration::Pallet::::config() + .scheduler_params + .max_validators_per_core + .unwrap_or(5) } /// Specify a mapping of core index/ para id to the number of dispute statements for the @@ -510,7 +510,7 @@ impl BenchBuilder { .iter() .map(|(seed, num_votes)| { assert!(*num_votes <= validators.len() as u32); - let (para_id, _core_idx, group_idx) = self.create_indexes(*seed); + let (para_id, core_idx, group_idx) = self.create_indexes(*seed); // This generates a pair and adds it to the keystore, returning just the public. let collator_public = CollatorId::generate_pair(None); @@ -587,11 +587,18 @@ impl BenchBuilder { }) .collect(); + // Check if the elastic scaling bit is set, if so we need to supply the core index + // in the generated candidate. + let core_idx = configuration::Pallet::::config() + .node_features + .get(FeatureIndex::ElasticScalingMVP as usize) + .map(|_the_bit| core_idx); + BackedCandidate::::new( candidate, validity_votes, bitvec::bitvec![u8, bitvec::order::Lsb0; 1; group_validators.len()], - None, + core_idx, ) }) .collect() @@ -684,7 +691,7 @@ impl BenchBuilder { // We are currently in Session 0, so these changes will take effect in Session 2. Self::setup_para_ids(used_cores); configuration::ActiveConfig::::mutate(|c| { - c.coretime_cores = used_cores; + c.scheduler_params.num_cores = used_cores; }); let validator_ids = Self::generate_validator_pairs(self.max_validators()); @@ -715,8 +722,7 @@ impl BenchBuilder { let cores = (0..used_cores) .into_iter() .map(|i| { - let AssignmentProviderConfig { ttl, .. } = - scheduler::Pallet::::assignment_provider_config(CoreIndex(i)); + let ttl = configuration::Pallet::::config().scheduler_params.ttl; // Load an assignment into provider so that one is present to pop let assignment = ::AssignmentProvider::get_mock_assignment( CoreIndex(i), @@ -731,8 +737,7 @@ impl BenchBuilder { let cores = (0..used_cores) .into_iter() .map(|i| { - let AssignmentProviderConfig { ttl, .. } = - scheduler::Pallet::::assignment_provider_config(CoreIndex(i)); + let ttl = configuration::Pallet::::config().scheduler_params.ttl; // Load an assignment into provider so that one is present to pop let assignment = ::AssignmentProvider::get_mock_assignment( diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 7cc5b31fc8fd1dfbed7a02b293eca2dab4b95cf4..364a15215d38c5eae477b3a73987a43d0216278b 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -29,7 +29,6 @@ use primitives::{ vstaging::{ApprovalVotingParams, NodeFeatures}, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, - ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; use sp_std::prelude::*; @@ -43,6 +42,7 @@ mod benchmarking; pub mod migration; pub use pallet::*; +use primitives::vstaging::SchedulerParams; const LOG_TARGET: &str = "runtime::configuration"; @@ -118,9 +118,9 @@ pub struct HostConfiguration { /// been completed. /// /// Note, there are situations in which `expected_at` in the past. For example, if - /// [`paras_availability_period`](Self::paras_availability_period) is less than the delay set - /// by this field or if PVF pre-check took more time than the delay. In such cases, the upgrade - /// is further at the earliest possible time determined by + /// [`paras_availability_period`](SchedulerParams::paras_availability_period) is less than the + /// delay set by this field or if PVF pre-check took more time than the delay. In such cases, + /// the upgrade is further at the earliest possible time determined by /// [`minimum_validation_upgrade_delay`](Self::minimum_validation_upgrade_delay). /// /// The rationale for this delay has to do with relay-chain reversions. In case there is an @@ -172,48 +172,7 @@ pub struct HostConfiguration { /// How long to keep code on-chain, in blocks. This should be sufficiently long that disputes /// have concluded. pub code_retention_period: BlockNumber, - /// How many cores are managed by the coretime chain. - pub coretime_cores: u32, - /// The number of retries that a on demand author has to submit their block. - pub on_demand_retries: u32, - /// The maximum queue size of the pay as you go module. - pub on_demand_queue_max_size: u32, - /// The target utilization of the spot price queue in percentages. - pub on_demand_target_queue_utilization: Perbill, - /// How quickly the fee rises in reaction to increased utilization. - /// The lower the number the slower the increase. - pub on_demand_fee_variability: Perbill, - /// The minimum amount needed to claim a slot in the spot pricing queue. - pub on_demand_base_fee: Balance, - /// The number of blocks an on demand claim stays in the scheduler's claimqueue before getting - /// cleared. This number should go reasonably higher than the number of blocks in the async - /// backing lookahead. - pub on_demand_ttl: BlockNumber, - /// How often parachain groups should be rotated across parachains. - /// - /// Must be non-zero. - pub group_rotation_frequency: BlockNumber, - /// The minimum availability period, in blocks. - /// - /// This is the minimum amount of blocks after a core became occupied that validators have time - /// to make the block available. - /// - /// This value only has effect on group rotations. If backers backed something at the end of - /// their rotation, the occupied core affects the backing group that comes afterwards. We limit - /// the effect one backing group can have on the next to `paras_availability_period` blocks. - /// - /// Within a group rotation there is no timeout as backers are only affecting themselves. - /// - /// Must be at least 1. With a value of 1, the previous group will not be able to negatively - /// affect the following group at the expense of a tight availability timeline at group - /// rotation boundaries. - pub paras_availability_period: BlockNumber, - /// The amount of blocks ahead to schedule paras. - pub scheduling_lookahead: u32, - /// The maximum number of validators to have per core. - /// - /// `None` means no maximum. - pub max_validators_per_core: Option, + /// The maximum number of validators to use for parachain consensus, period. /// /// `None` means no maximum. @@ -257,7 +216,7 @@ pub struct HostConfiguration { /// scheduled. This number is controlled by this field. /// /// This value should be greater than - /// [`paras_availability_period`](Self::paras_availability_period). + /// [`paras_availability_period`](SchedulerParams::paras_availability_period). pub minimum_validation_upgrade_delay: BlockNumber, /// The minimum number of valid backing statements required to consider a parachain candidate /// backable. @@ -266,6 +225,8 @@ pub struct HostConfiguration { pub node_features: NodeFeatures, /// Params used by approval-voting pub approval_voting_params: ApprovalVotingParams, + /// Scheduler parameters + pub scheduler_params: SchedulerParams, } impl> Default for HostConfiguration { @@ -275,8 +236,6 @@ impl> Default for HostConfiguration> Default for HostConfiguration> Default for HostConfiguration { ZeroMinimumBackingVotes, /// `executor_params` are inconsistent. InconsistentExecutorParams { inner: ExecutorParamError }, + /// TTL should be bigger than lookahead + LookaheadExceedsTTL, } impl HostConfiguration @@ -373,11 +326,11 @@ where pub fn check_consistency(&self) -> Result<(), InconsistentError> { use InconsistentError::*; - if self.group_rotation_frequency.is_zero() { + if self.scheduler_params.group_rotation_frequency.is_zero() { return Err(ZeroGroupRotationFrequency) } - if self.paras_availability_period.is_zero() { + if self.scheduler_params.paras_availability_period.is_zero() { return Err(ZeroParasAvailabilityPeriod) } @@ -399,10 +352,11 @@ where return Err(MaxPovSizeExceedHardLimit { max_pov_size: self.max_pov_size }) } - if self.minimum_validation_upgrade_delay <= self.paras_availability_period { + if self.minimum_validation_upgrade_delay <= self.scheduler_params.paras_availability_period + { return Err(MinimumValidationUpgradeDelayLessThanChainAvailabilityPeriod { minimum_validation_upgrade_delay: self.minimum_validation_upgrade_delay.clone(), - paras_availability_period: self.paras_availability_period.clone(), + paras_availability_period: self.scheduler_params.paras_availability_period.clone(), }) } @@ -447,6 +401,10 @@ where return Err(InconsistentExecutorParams { inner }) } + if self.scheduler_params.ttl < self.scheduler_params.lookahead.into() { + return Err(LookaheadExceedsTTL) + } + Ok(()) } @@ -471,6 +429,7 @@ pub trait WeightInfo { fn set_config_with_executor_params() -> Weight; fn set_config_with_perbill() -> Weight; fn set_node_feature() -> Weight; + fn set_config_with_scheduler_params() -> Weight; } pub struct TestWeightInfo; @@ -499,13 +458,16 @@ impl WeightInfo for TestWeightInfo { fn set_node_feature() -> Weight { Weight::MAX } + fn set_config_with_scheduler_params() -> Weight { + Weight::MAX + } } #[frame_support::pallet] pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. /// /// v0-v1: /// v1-v2: @@ -520,7 +482,8 @@ pub mod pallet { /// v8-v9: /// v9-v10: /// v10-11: - const STORAGE_VERSION: StorageVersion = StorageVersion::new(11); + /// v11-12: + const STORAGE_VERSION: StorageVersion = StorageVersion::new(12); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -679,16 +642,16 @@ pub mod pallet { Self::set_coretime_cores_unchecked(new) } - /// Set the number of retries for a particular on demand. + /// Set the max number of times a claim may timeout on a core before it is abandoned #[pallet::call_index(7)] #[pallet::weight(( T::WeightInfo::set_config_with_u32(), DispatchClass::Operational, ))] - pub fn set_on_demand_retries(origin: OriginFor, new: u32) -> DispatchResult { + pub fn set_max_availability_timeouts(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_retries = new; + config.scheduler_params.max_availability_timeouts = new; }) } @@ -704,7 +667,7 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.group_rotation_frequency = new; + config.scheduler_params.group_rotation_frequency = new; }) } @@ -720,7 +683,7 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.paras_availability_period = new; + config.scheduler_params.paras_availability_period = new; }) } @@ -733,7 +696,7 @@ pub mod pallet { pub fn set_scheduling_lookahead(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.scheduling_lookahead = new; + config.scheduler_params.lookahead = new; }) } @@ -749,7 +712,7 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.max_validators_per_core = new; + config.scheduler_params.max_validators_per_core = new; }) } @@ -1141,7 +1104,7 @@ pub mod pallet { pub fn set_on_demand_base_fee(origin: OriginFor, new: Balance) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_base_fee = new; + config.scheduler_params.on_demand_base_fee = new; }) } @@ -1154,7 +1117,7 @@ pub mod pallet { pub fn set_on_demand_fee_variability(origin: OriginFor, new: Perbill) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_fee_variability = new; + config.scheduler_params.on_demand_fee_variability = new; }) } @@ -1167,7 +1130,7 @@ pub mod pallet { pub fn set_on_demand_queue_max_size(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_queue_max_size = new; + config.scheduler_params.on_demand_queue_max_size = new; }) } /// Set the on demand (parathreads) fee variability. @@ -1182,7 +1145,7 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_target_queue_utilization = new; + config.scheduler_params.on_demand_target_queue_utilization = new; }) } /// Set the on demand (parathreads) ttl in the claimqueue. @@ -1194,7 +1157,7 @@ pub mod pallet { pub fn set_on_demand_ttl(origin: OriginFor, new: BlockNumberFor) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_ttl = new; + config.scheduler_params.ttl = new; }) } @@ -1244,6 +1207,22 @@ pub mod pallet { config.approval_voting_params = new; }) } + + /// Set scheduler-params. + #[pallet::call_index(55)] + #[pallet::weight(( + T::WeightInfo::set_config_with_scheduler_params(), + DispatchClass::Operational, + ))] + pub fn set_scheduler_params( + origin: OriginFor, + new: SchedulerParams>, + ) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.scheduler_params = new; + }) + } } impl Pallet { @@ -1252,7 +1231,7 @@ pub mod pallet { /// To be used if authorization is checked otherwise. pub fn set_coretime_cores_unchecked(new: u32) -> DispatchResult { Self::schedule_config_update(|config| { - config.coretime_cores = new; + config.scheduler_params.num_cores = new; }) } } @@ -1393,7 +1372,7 @@ impl Pallet { let base_config_consistent = base_config.check_consistency().is_ok(); // Now, we need to decide what the new configuration should be. - // We also move the `base_config` to `new_config` to empahsize that the base config was + // We also move the `base_config` to `new_config` to emphasize that the base config was // destroyed by the `updater`. updater(&mut base_config); let new_config = base_config; diff --git a/polkadot/runtime/parachains/src/configuration/benchmarking.rs b/polkadot/runtime/parachains/src/configuration/benchmarking.rs index 67daf1c459884d42378056b6528605da29578c95..882b5aab096ad2f8227aa7ad0efaddfd2078c3ad 100644 --- a/polkadot/runtime/parachains/src/configuration/benchmarking.rs +++ b/polkadot/runtime/parachains/src/configuration/benchmarking.rs @@ -51,6 +51,8 @@ benchmarks! { set_node_feature{}: set_node_feature(RawOrigin::Root, 255, true) + set_config_with_scheduler_params {} : set_scheduler_params(RawOrigin::Root, SchedulerParams::default()) + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext(Default::default()), diff --git a/polkadot/runtime/parachains/src/configuration/migration.rs b/polkadot/runtime/parachains/src/configuration/migration.rs index 2838b73092dbab4a029a684948a85123aa489906..87b30b177e735bdc1ec618a6e403c4c74a801533 100644 --- a/polkadot/runtime/parachains/src/configuration/migration.rs +++ b/polkadot/runtime/parachains/src/configuration/migration.rs @@ -18,6 +18,7 @@ pub mod v10; pub mod v11; +pub mod v12; pub mod v6; pub mod v7; pub mod v8; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v11.rs b/polkadot/runtime/parachains/src/configuration/migration/v11.rs index a69061ac1381e253cdbaef6d4681511973f83e86..f6e0e0431640ba271f592759308cc0d07efcfa71 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v11.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v11.rs @@ -21,13 +21,122 @@ use frame_support::{ migrations::VersionedMigration, pallet_prelude::*, traits::Defensive, weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{vstaging::ApprovalVotingParams, SessionIndex}; +use primitives::{ + vstaging::ApprovalVotingParams, AsyncBackingParams, ExecutorParams, SessionIndex, + LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, +}; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; +use polkadot_core_primitives::Balance; +use primitives::vstaging::NodeFeatures; +use sp_arithmetic::Perbill; use super::v10::V10HostConfiguration; -type V11HostConfiguration = configuration::HostConfiguration; + +#[derive(Clone, Encode, PartialEq, Decode, Debug)] +pub struct V11HostConfiguration { + pub max_code_size: u32, + pub max_head_data_size: u32, + pub max_upward_queue_count: u32, + pub max_upward_queue_size: u32, + pub max_upward_message_size: u32, + pub max_upward_message_num_per_candidate: u32, + pub hrmp_max_message_num_per_candidate: u32, + pub validation_upgrade_cooldown: BlockNumber, + pub validation_upgrade_delay: BlockNumber, + pub async_backing_params: AsyncBackingParams, + pub max_pov_size: u32, + pub max_downward_message_size: u32, + pub hrmp_max_parachain_outbound_channels: u32, + pub hrmp_sender_deposit: Balance, + pub hrmp_recipient_deposit: Balance, + pub hrmp_channel_max_capacity: u32, + pub hrmp_channel_max_total_size: u32, + pub hrmp_max_parachain_inbound_channels: u32, + pub hrmp_channel_max_message_size: u32, + pub executor_params: ExecutorParams, + pub code_retention_period: BlockNumber, + pub coretime_cores: u32, + pub on_demand_retries: u32, + pub on_demand_queue_max_size: u32, + pub on_demand_target_queue_utilization: Perbill, + pub on_demand_fee_variability: Perbill, + pub on_demand_base_fee: Balance, + pub on_demand_ttl: BlockNumber, + pub group_rotation_frequency: BlockNumber, + pub paras_availability_period: BlockNumber, + pub scheduling_lookahead: u32, + pub max_validators_per_core: Option, + pub max_validators: Option, + pub dispute_period: SessionIndex, + pub dispute_post_conclusion_acceptance_period: BlockNumber, + pub no_show_slots: u32, + pub n_delay_tranches: u32, + pub zeroth_delay_tranche_width: u32, + pub needed_approvals: u32, + pub relay_vrf_modulo_samples: u32, + pub pvf_voting_ttl: SessionIndex, + pub minimum_validation_upgrade_delay: BlockNumber, + pub minimum_backing_votes: u32, + pub node_features: NodeFeatures, + pub approval_voting_params: ApprovalVotingParams, +} + +impl> Default for V11HostConfiguration { + fn default() -> Self { + Self { + async_backing_params: AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: 0, + }, + group_rotation_frequency: 1u32.into(), + paras_availability_period: 1u32.into(), + no_show_slots: 1u32.into(), + validation_upgrade_cooldown: Default::default(), + validation_upgrade_delay: 2u32.into(), + code_retention_period: Default::default(), + max_code_size: Default::default(), + max_pov_size: Default::default(), + max_head_data_size: Default::default(), + coretime_cores: Default::default(), + on_demand_retries: Default::default(), + scheduling_lookahead: 1, + max_validators_per_core: Default::default(), + max_validators: None, + dispute_period: 6, + dispute_post_conclusion_acceptance_period: 100.into(), + n_delay_tranches: Default::default(), + zeroth_delay_tranche_width: Default::default(), + needed_approvals: Default::default(), + relay_vrf_modulo_samples: Default::default(), + max_upward_queue_count: Default::default(), + max_upward_queue_size: Default::default(), + max_downward_message_size: Default::default(), + max_upward_message_size: Default::default(), + max_upward_message_num_per_candidate: Default::default(), + hrmp_sender_deposit: Default::default(), + hrmp_recipient_deposit: Default::default(), + hrmp_channel_max_capacity: Default::default(), + hrmp_channel_max_total_size: Default::default(), + hrmp_max_parachain_inbound_channels: Default::default(), + hrmp_channel_max_message_size: Default::default(), + hrmp_max_parachain_outbound_channels: Default::default(), + hrmp_max_message_num_per_candidate: Default::default(), + pvf_voting_ttl: 2u32.into(), + minimum_validation_upgrade_delay: 2.into(), + executor_params: Default::default(), + approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 1 }, + on_demand_queue_max_size: ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + on_demand_base_fee: 10_000_000u128, + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_target_queue_utilization: Perbill::from_percent(25), + on_demand_ttl: 5u32.into(), + minimum_backing_votes: LEGACY_MIN_BACKING_VOTES, + node_features: NodeFeatures::EMPTY, + } + } +} mod v10 { use super::*; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v12.rs b/polkadot/runtime/parachains/src/configuration/migration/v12.rs new file mode 100644 index 0000000000000000000000000000000000000000..4295a79893e8ee52632d54772bd108b4de37cc3f --- /dev/null +++ b/polkadot/runtime/parachains/src/configuration/migration/v12.rs @@ -0,0 +1,349 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. + +use crate::configuration::{self, migration::v11::V11HostConfiguration, Config, Pallet}; +use frame_support::{ + migrations::VersionedMigration, + pallet_prelude::*, + traits::{Defensive, OnRuntimeUpgrade}, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use primitives::vstaging::SchedulerParams; +use sp_core::Get; +use sp_staking::SessionIndex; +use sp_std::vec::Vec; + +type V12HostConfiguration = configuration::HostConfiguration; + +mod v11 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V11HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V11HostConfiguration>)>, + OptionQuery, + >; +} + +mod v12 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V12HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V12HostConfiguration>)>, + OptionQuery, + >; +} + +pub type MigrateToV12 = VersionedMigration< + 11, + 12, + UncheckedMigrateToV12, + Pallet, + ::DbWeight, +>; + +pub struct UncheckedMigrateToV12(sp_std::marker::PhantomData); + +impl OnRuntimeUpgrade for UncheckedMigrateToV12 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV12"); + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + log::info!(target: configuration::LOG_TARGET, "HostConfiguration MigrateToV12 started"); + let weight_consumed = migrate_to_v12::(); + + log::info!(target: configuration::LOG_TARGET, "HostConfiguration MigrateToV12 executed successfully"); + + weight_consumed + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running post_upgrade() for HostConfiguration MigrateToV12"); + ensure!( + StorageVersion::get::>() >= 12, + "Storage version should be >= 12 after the migration" + ); + + Ok(()) + } +} + +fn migrate_to_v12() -> Weight { + // Unusual formatting is justified: + // - make it easier to verify that fields assign what they supposed to assign. + // - this code is transient and will be removed after all migrations are done. + // - this code is important enough to optimize for legibility sacrificing consistency. + #[rustfmt::skip] + let translate = + |pre: V11HostConfiguration>| -> + V12HostConfiguration> + { + V12HostConfiguration { + max_code_size : pre.max_code_size, + max_head_data_size : pre.max_head_data_size, + max_upward_queue_count : pre.max_upward_queue_count, + max_upward_queue_size : pre.max_upward_queue_size, + max_upward_message_size : pre.max_upward_message_size, + max_upward_message_num_per_candidate : pre.max_upward_message_num_per_candidate, + hrmp_max_message_num_per_candidate : pre.hrmp_max_message_num_per_candidate, + validation_upgrade_cooldown : pre.validation_upgrade_cooldown, + validation_upgrade_delay : pre.validation_upgrade_delay, + max_pov_size : pre.max_pov_size, + max_downward_message_size : pre.max_downward_message_size, + hrmp_sender_deposit : pre.hrmp_sender_deposit, + hrmp_recipient_deposit : pre.hrmp_recipient_deposit, + hrmp_channel_max_capacity : pre.hrmp_channel_max_capacity, + hrmp_channel_max_total_size : pre.hrmp_channel_max_total_size, + hrmp_max_parachain_inbound_channels : pre.hrmp_max_parachain_inbound_channels, + hrmp_max_parachain_outbound_channels : pre.hrmp_max_parachain_outbound_channels, + hrmp_channel_max_message_size : pre.hrmp_channel_max_message_size, + code_retention_period : pre.code_retention_period, + max_validators : pre.max_validators, + dispute_period : pre.dispute_period, + dispute_post_conclusion_acceptance_period: pre.dispute_post_conclusion_acceptance_period, + no_show_slots : pre.no_show_slots, + n_delay_tranches : pre.n_delay_tranches, + zeroth_delay_tranche_width : pre.zeroth_delay_tranche_width, + needed_approvals : pre.needed_approvals, + relay_vrf_modulo_samples : pre.relay_vrf_modulo_samples, + pvf_voting_ttl : pre.pvf_voting_ttl, + minimum_validation_upgrade_delay : pre.minimum_validation_upgrade_delay, + async_backing_params : pre.async_backing_params, + executor_params : pre.executor_params, + minimum_backing_votes : pre.minimum_backing_votes, + node_features : pre.node_features, + approval_voting_params : pre.approval_voting_params, + scheduler_params: SchedulerParams { + group_rotation_frequency : pre.group_rotation_frequency, + paras_availability_period : pre.paras_availability_period, + max_validators_per_core : pre.max_validators_per_core, + lookahead : pre.scheduling_lookahead, + num_cores : pre.coretime_cores, + max_availability_timeouts : pre.on_demand_retries, + on_demand_queue_max_size : pre.on_demand_queue_max_size, + on_demand_target_queue_utilization : pre.on_demand_target_queue_utilization, + on_demand_fee_variability : pre.on_demand_fee_variability, + on_demand_base_fee : pre.on_demand_base_fee, + ttl : pre.on_demand_ttl, + } + } + }; + + let v11 = v11::ActiveConfig::::get() + .defensive_proof("Could not decode old config") + .unwrap_or_default(); + let v12 = translate(v11); + v12::ActiveConfig::::set(Some(v12)); + + // Allowed to be empty. + let pending_v11 = v11::PendingConfigs::::get().unwrap_or_default(); + let mut pending_v12 = Vec::new(); + + for (session, v11) in pending_v11.into_iter() { + let v12 = translate(v11); + pending_v12.push((session, v12)); + } + v12::PendingConfigs::::set(Some(pending_v12.clone())); + + let num_configs = (pending_v12.len() + 1) as u64; + T::DbWeight::get().reads_writes(num_configs, num_configs) +} + +#[cfg(test)] +mod tests { + use primitives::LEGACY_MIN_BACKING_VOTES; + use sp_arithmetic::Perbill; + + use super::*; + use crate::mock::{new_test_ext, Test}; + + #[test] + fn v12_deserialized_from_actual_data() { + // Example how to get new `raw_config`: + // We'll obtain the raw_config at a specified a block + // Steps: + // 1. Go to Polkadot.js -> Developer -> Chain state -> Storage: https://polkadot.js.org/apps/#/chainstate + // 2. Set these parameters: + // 2.1. selected state query: configuration; activeConfig(): + // PolkadotRuntimeParachainsConfigurationHostConfiguration + // 2.2. blockhash to query at: + // 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of + // the block) + // 2.3. Note the value of encoded storage key -> + // 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the + // referenced block. + // 2.4. You'll also need the decoded values to update the test. + // 3. Go to Polkadot.js -> Developer -> Chain state -> Raw storage + // 3.1 Enter the encoded storage key and you get the raw config. + + // This exceeds the maximal line width length, but that's fine, since this is not code and + // doesn't need to be read and also leaving it as one line allows to easily copy it. + let raw_config = + hex_literal::hex![ + "0000300000800000080000000000100000c8000005000000050000000200000002000000000000000000000000005000000010000400000000000000000000000000000000000000000000000000000000000000000000000800000000200000040000000000100000b004000000060000006400000002000000190000000000000002000000020000000200000005000000020000000001000000140000000400000001010000000100000001000000000000001027000080b2e60e80c3c9018096980000000000000000000000000005000000" + ]; + + let v12 = + V12HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + + // We check only a sample of the values here. If we missed any fields or messed up data + // types that would skew all the fields coming after. + assert_eq!(v12.max_code_size, 3_145_728); + assert_eq!(v12.validation_upgrade_cooldown, 2); + assert_eq!(v12.max_pov_size, 5_242_880); + assert_eq!(v12.hrmp_channel_max_message_size, 1_048_576); + assert_eq!(v12.n_delay_tranches, 25); + assert_eq!(v12.minimum_validation_upgrade_delay, 5); + assert_eq!(v12.minimum_backing_votes, LEGACY_MIN_BACKING_VOTES); + assert_eq!(v12.approval_voting_params.max_approval_coalesce_count, 1); + assert_eq!(v12.scheduler_params.group_rotation_frequency, 20); + assert_eq!(v12.scheduler_params.paras_availability_period, 4); + assert_eq!(v12.scheduler_params.lookahead, 1); + assert_eq!(v12.scheduler_params.num_cores, 1); + assert_eq!(v12.scheduler_params.max_availability_timeouts, 0); + assert_eq!(v12.scheduler_params.on_demand_queue_max_size, 10_000); + assert_eq!( + v12.scheduler_params.on_demand_target_queue_utilization, + Perbill::from_percent(25) + ); + assert_eq!(v12.scheduler_params.on_demand_fee_variability, Perbill::from_percent(3)); + assert_eq!(v12.scheduler_params.on_demand_base_fee, 10_000_000); + assert_eq!(v12.scheduler_params.ttl, 5); + } + + #[test] + fn test_migrate_to_v12() { + // Host configuration has lots of fields. However, in this migration we only add one + // field. The most important part to check are a couple of the last fields. We also pick + // extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and + // also their type. + // + // We specify only the picked fields and the rest should be provided by the `Default` + // implementation. That implementation is copied over between the two types and should work + // fine. + let v11 = V11HostConfiguration:: { + needed_approvals: 69, + paras_availability_period: 55, + hrmp_recipient_deposit: 1337, + max_pov_size: 1111, + minimum_validation_upgrade_delay: 20, + on_demand_ttl: 3, + on_demand_retries: 10, + ..Default::default() + }; + + let mut pending_configs = Vec::new(); + pending_configs.push((100, v11.clone())); + pending_configs.push((300, v11.clone())); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v10 version in the state. + v11::ActiveConfig::::set(Some(v11.clone())); + v11::PendingConfigs::::set(Some(pending_configs)); + + migrate_to_v12::(); + + let v12 = v12::ActiveConfig::::get().unwrap(); + assert_eq!(v12.approval_voting_params.max_approval_coalesce_count, 1); + + let mut configs_to_check = v12::PendingConfigs::::get().unwrap(); + configs_to_check.push((0, v12.clone())); + + for (_, v12) in configs_to_check { + #[rustfmt::skip] + { + assert_eq!(v11.max_code_size , v12.max_code_size); + assert_eq!(v11.max_head_data_size , v12.max_head_data_size); + assert_eq!(v11.max_upward_queue_count , v12.max_upward_queue_count); + assert_eq!(v11.max_upward_queue_size , v12.max_upward_queue_size); + assert_eq!(v11.max_upward_message_size , v12.max_upward_message_size); + assert_eq!(v11.max_upward_message_num_per_candidate , v12.max_upward_message_num_per_candidate); + assert_eq!(v11.hrmp_max_message_num_per_candidate , v12.hrmp_max_message_num_per_candidate); + assert_eq!(v11.validation_upgrade_cooldown , v12.validation_upgrade_cooldown); + assert_eq!(v11.validation_upgrade_delay , v12.validation_upgrade_delay); + assert_eq!(v11.max_pov_size , v12.max_pov_size); + assert_eq!(v11.max_downward_message_size , v12.max_downward_message_size); + assert_eq!(v11.hrmp_max_parachain_outbound_channels , v12.hrmp_max_parachain_outbound_channels); + assert_eq!(v11.hrmp_sender_deposit , v12.hrmp_sender_deposit); + assert_eq!(v11.hrmp_recipient_deposit , v12.hrmp_recipient_deposit); + assert_eq!(v11.hrmp_channel_max_capacity , v12.hrmp_channel_max_capacity); + assert_eq!(v11.hrmp_channel_max_total_size , v12.hrmp_channel_max_total_size); + assert_eq!(v11.hrmp_max_parachain_inbound_channels , v12.hrmp_max_parachain_inbound_channels); + assert_eq!(v11.hrmp_channel_max_message_size , v12.hrmp_channel_max_message_size); + assert_eq!(v11.code_retention_period , v12.code_retention_period); + assert_eq!(v11.max_validators , v12.max_validators); + assert_eq!(v11.dispute_period , v12.dispute_period); + assert_eq!(v11.no_show_slots , v12.no_show_slots); + assert_eq!(v11.n_delay_tranches , v12.n_delay_tranches); + assert_eq!(v11.zeroth_delay_tranche_width , v12.zeroth_delay_tranche_width); + assert_eq!(v11.needed_approvals , v12.needed_approvals); + assert_eq!(v11.relay_vrf_modulo_samples , v12.relay_vrf_modulo_samples); + assert_eq!(v11.pvf_voting_ttl , v12.pvf_voting_ttl); + assert_eq!(v11.minimum_validation_upgrade_delay , v12.minimum_validation_upgrade_delay); + assert_eq!(v11.async_backing_params.allowed_ancestry_len, v12.async_backing_params.allowed_ancestry_len); + assert_eq!(v11.async_backing_params.max_candidate_depth , v12.async_backing_params.max_candidate_depth); + assert_eq!(v11.executor_params , v12.executor_params); + assert_eq!(v11.minimum_backing_votes , v12.minimum_backing_votes); + assert_eq!(v11.group_rotation_frequency , v12.scheduler_params.group_rotation_frequency); + assert_eq!(v11.paras_availability_period , v12.scheduler_params.paras_availability_period); + assert_eq!(v11.max_validators_per_core , v12.scheduler_params.max_validators_per_core); + assert_eq!(v11.scheduling_lookahead , v12.scheduler_params.lookahead); + assert_eq!(v11.coretime_cores , v12.scheduler_params.num_cores); + assert_eq!(v11.on_demand_retries , v12.scheduler_params.max_availability_timeouts); + assert_eq!(v11.on_demand_queue_max_size , v12.scheduler_params.on_demand_queue_max_size); + assert_eq!(v11.on_demand_target_queue_utilization , v12.scheduler_params.on_demand_target_queue_utilization); + assert_eq!(v11.on_demand_fee_variability , v12.scheduler_params.on_demand_fee_variability); + assert_eq!(v11.on_demand_base_fee , v12.scheduler_params.on_demand_base_fee); + assert_eq!(v11.on_demand_ttl , v12.scheduler_params.ttl); + }; // ; makes this a statement. `rustfmt::skip` cannot be put on an expression. + } + }); + } + + // Test that migration doesn't panic in case there are no pending configurations upgrades in + // pallet's storage. + #[test] + fn test_migrate_to_v12_no_pending() { + let v11 = V11HostConfiguration::::default(); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v10 version in the state. + v11::ActiveConfig::::set(Some(v11)); + // Ensure there are no pending configs. + v12::PendingConfigs::::set(None); + + // Shouldn't fail. + migrate_to_v12::(); + }); + } +} diff --git a/polkadot/runtime/parachains/src/configuration/tests.rs b/polkadot/runtime/parachains/src/configuration/tests.rs index f1570017dd7ba75db429eac4f00f26fa277de2d6..254511231cac1729981706775bb9f844ca9f1c5c 100644 --- a/polkadot/runtime/parachains/src/configuration/tests.rs +++ b/polkadot/runtime/parachains/src/configuration/tests.rs @@ -226,8 +226,11 @@ fn invariants() { ); ActiveConfig::::put(HostConfiguration { - paras_availability_period: 10, minimum_validation_upgrade_delay: 11, + scheduler_params: SchedulerParams { + paras_availability_period: 10, + ..Default::default() + }, ..Default::default() }); assert_err!( @@ -283,12 +286,6 @@ fn setting_pending_config_members() { max_code_size: 100_000, max_pov_size: 1024, max_head_data_size: 1_000, - coretime_cores: 2, - on_demand_retries: 5, - group_rotation_frequency: 20, - paras_availability_period: 10, - scheduling_lookahead: 3, - max_validators_per_core: None, max_validators: None, dispute_period: 239, dispute_post_conclusion_acceptance_period: 10, @@ -314,13 +311,21 @@ fn setting_pending_config_members() { minimum_validation_upgrade_delay: 20, executor_params: Default::default(), approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 1 }, - on_demand_queue_max_size: 10_000u32, - on_demand_base_fee: 10_000_000u128, - on_demand_fee_variability: Perbill::from_percent(3), - on_demand_target_queue_utilization: Perbill::from_percent(25), - on_demand_ttl: 5u32, minimum_backing_votes: 5, node_features: bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], + scheduler_params: SchedulerParams { + group_rotation_frequency: 20, + paras_availability_period: 10, + max_validators_per_core: None, + lookahead: 3, + num_cores: 2, + max_availability_timeouts: 5, + on_demand_queue_max_size: 10_000u32, + on_demand_base_fee: 10_000_000u128, + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_target_queue_utilization: Perbill::from_percent(25), + ttl: 5u32, + }, }; Configuration::set_validation_upgrade_cooldown( @@ -342,13 +347,19 @@ fn setting_pending_config_members() { Configuration::set_max_pov_size(RuntimeOrigin::root(), new_config.max_pov_size).unwrap(); Configuration::set_max_head_data_size(RuntimeOrigin::root(), new_config.max_head_data_size) .unwrap(); - Configuration::set_coretime_cores(RuntimeOrigin::root(), new_config.coretime_cores) - .unwrap(); - Configuration::set_on_demand_retries(RuntimeOrigin::root(), new_config.on_demand_retries) - .unwrap(); + Configuration::set_coretime_cores( + RuntimeOrigin::root(), + new_config.scheduler_params.num_cores, + ) + .unwrap(); + Configuration::set_max_availability_timeouts( + RuntimeOrigin::root(), + new_config.scheduler_params.max_availability_timeouts, + ) + .unwrap(); Configuration::set_group_rotation_frequency( RuntimeOrigin::root(), - new_config.group_rotation_frequency, + new_config.scheduler_params.group_rotation_frequency, ) .unwrap(); // This comes out of order to satisfy the validity criteria for the chain and thread @@ -360,17 +371,17 @@ fn setting_pending_config_members() { .unwrap(); Configuration::set_paras_availability_period( RuntimeOrigin::root(), - new_config.paras_availability_period, + new_config.scheduler_params.paras_availability_period, ) .unwrap(); Configuration::set_scheduling_lookahead( RuntimeOrigin::root(), - new_config.scheduling_lookahead, + new_config.scheduler_params.lookahead, ) .unwrap(); Configuration::set_max_validators_per_core( RuntimeOrigin::root(), - new_config.max_validators_per_core, + new_config.scheduler_params.max_validators_per_core, ) .unwrap(); Configuration::set_max_validators(RuntimeOrigin::root(), new_config.max_validators) diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs index 9bc0a20ef5b4afce7d63da9bb1d231d39bf445ab..03fecc58570fba65df79ca48930acb3346d3f556 100644 --- a/polkadot/runtime/parachains/src/coretime/migration.rs +++ b/polkadot/runtime/parachains/src/coretime/migration.rs @@ -114,7 +114,7 @@ mod v_coretime { let legacy_paras = paras::Parachains::::get(); let config = >::config(); - let total_core_count = config.coretime_cores + legacy_paras.len() as u32; + let total_core_count = config.scheduler_params.num_cores + legacy_paras.len() as u32; let dmp_queue_size = crate::dmp::Pallet::::dmq_contents(T::BrokerId::get().into()).len() as u32; @@ -150,7 +150,7 @@ mod v_coretime { // Migrate to Coretime. // - // NOTE: Also migrates coretime_cores config value in configuration::ActiveConfig. + // NOTE: Also migrates `num_cores` config value in configuration::ActiveConfig. fn migrate_to_coretime< T: Config, SendXcm: xcm::v4::SendXcm, @@ -176,8 +176,8 @@ mod v_coretime { } let config = >::config(); - // coretime_cores was on_demand_cores until now: - for on_demand in 0..config.coretime_cores { + // num_cores was on_demand_cores until now: + for on_demand in 0..config.scheduler_params.num_cores { let core = CoreIndex(legacy_count.saturating_add(on_demand as _)); let r = assigner_coretime::Pallet::::assign_core( core, @@ -189,9 +189,9 @@ mod v_coretime { log::error!("Creating assignment for existing on-demand core, failed: {:?}", err); } } - let total_cores = config.coretime_cores + legacy_count; + let total_cores = config.scheduler_params.num_cores + legacy_count; configuration::ActiveConfig::::mutate(|c| { - c.coretime_cores = total_cores; + c.scheduler_params.num_cores = total_cores; }); if let Err(err) = migrate_send_assignments_to_coretime_chain::() { @@ -200,7 +200,9 @@ mod v_coretime { let single_weight = ::WeightInfo::assign_core(1); single_weight - .saturating_mul(u64::from(legacy_count.saturating_add(config.coretime_cores))) + .saturating_mul(u64::from( + legacy_count.saturating_add(config.scheduler_params.num_cores), + )) // Second read from sending assignments to the coretime chain. .saturating_add(T::DbWeight::get().reads_writes(2, 1)) } @@ -244,7 +246,8 @@ mod v_coretime { Some(mk_coretime_call(crate::coretime::CoretimeCalls::SetLease(p.into(), time_slice))) }); - let core_count: u16 = configuration::Pallet::::config().coretime_cores.saturated_into(); + let core_count: u16 = + configuration::Pallet::::config().scheduler_params.num_cores.saturated_into(); let set_core_count = iter::once(mk_coretime_call( crate::coretime::CoretimeCalls::NotifyCoreCount(core_count), )); diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index 531f5c2e4e470095989bbb73429cc2180ff4f321..eb9646d7e869d35763a5b0cdf8a408de66126b78 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -214,8 +214,8 @@ impl Pallet { } pub fn initializer_on_new_session(notification: &SessionChangeNotification>) { - let old_core_count = notification.prev_config.coretime_cores; - let new_core_count = notification.new_config.coretime_cores; + let old_core_count = notification.prev_config.scheduler_params.num_cores; + let new_core_count = notification.new_config.scheduler_params.num_cores; if new_core_count != old_core_count { let core_count: u16 = new_core_count.saturated_into(); let message = Xcm(vec![ diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs index c2383dad3053882b7abec959446b23d149aa4a5f..da95b060c14a3fad03e98f5e2a75ac54eef1928c 100644 --- a/polkadot/runtime/parachains/src/disputes.rs +++ b/polkadot/runtime/parachains/src/disputes.rs @@ -379,7 +379,7 @@ pub mod pallet { type WeightInfo: WeightInfo; } - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index d2b5a67c3e4538640b559bbf218539c122bf9d67..3fe7d7f0c7d4698c6b8301a41dca552e9c067509 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -47,10 +47,10 @@ use test_helpers::{dummy_collator, dummy_collator_signature, dummy_validation_co fn default_config() -> HostConfiguration { let mut config = HostConfiguration::default(); - config.coretime_cores = 1; + config.scheduler_params.num_cores = 1; config.max_code_size = 0b100000; config.max_head_data_size = 0b100000; - config.group_rotation_frequency = u32::MAX; + config.scheduler_params.group_rotation_frequency = u32::MAX; config } @@ -224,7 +224,7 @@ pub(crate) fn run_to_block( } pub(crate) fn expected_bits() -> usize { - Paras::parachains().len() + Configuration::config().coretime_cores as usize + Paras::parachains().len() + Configuration::config().scheduler_params.num_cores as usize } fn default_bitfield() -> AvailabilityBitfield { @@ -386,7 +386,7 @@ fn collect_pending_cleans_up_pending() { (thread_a, ParaKind::Parathread), ]; let mut config = genesis_config(paras); - config.configuration.config.group_rotation_frequency = 3; + config.configuration.config.scheduler_params.group_rotation_frequency = 3; new_test_ext(config).execute_with(|| { let default_candidate = TestCandidateBuilder::default().build(); >::insert( @@ -2062,7 +2062,7 @@ fn check_allowed_relay_parents() { } let validator_public = validator_pubkeys(&validators); let mut config = genesis_config(paras); - config.configuration.config.group_rotation_frequency = 1; + config.configuration.config.scheduler_params.group_rotation_frequency = 1; new_test_ext(config).execute_with(|| { shared::Pallet::::set_active_validators_ascending(validator_public.clone()); diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 1925ca19501accce8148e23d5e34c8f6625097a2..e18be03bdeb230d8959e3a1e76828d499c1c52bb 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -23,7 +23,7 @@ use crate::{ initializer, origin, paras, paras::ParaKind, paras_inherent, scheduler, - scheduler::common::{AssignmentProvider, AssignmentProviderConfig}, + scheduler::common::AssignmentProvider, session_info, shared, ParaId, }; use frame_support::pallet_prelude::*; @@ -463,10 +463,6 @@ pub mod mock_assigner { pub(super) type MockAssignmentQueue = StorageValue<_, VecDeque, ValueQuery>; - #[pallet::storage] - pub(super) type MockProviderConfig = - StorageValue<_, AssignmentProviderConfig, OptionQuery>; - #[pallet::storage] pub(super) type MockCoreCount = StorageValue<_, u32, OptionQuery>; } @@ -478,12 +474,6 @@ pub mod mock_assigner { MockAssignmentQueue::::mutate(|queue| queue.push_back(assignment)); } - // This configuration needs to be customized to service `get_provider_config` in - // scheduler tests. - pub fn set_assignment_provider_config(config: AssignmentProviderConfig) { - MockProviderConfig::::set(Some(config)); - } - // Allows for customized core count in scheduler tests, rather than a core count // derived from on-demand config + parachain count. pub fn set_core_count(count: u32) { @@ -512,17 +502,6 @@ pub mod mock_assigner { // in the mock assigner. fn push_back_assignment(_assignment: Assignment) {} - // Gets the provider config we set earlier using `set_assignment_provider_config`, falling - // back to the on demand parachain configuration if none was set. - fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig { - match MockProviderConfig::::get() { - Some(config) => config, - None => AssignmentProviderConfig { - max_availability_timeouts: 1, - ttl: BlockNumber::from(5u32), - }, - } - } #[cfg(any(feature = "runtime-benchmarks", test))] fn get_mock_assignment(_: CoreIndex, para_id: ParaId) -> Assignment { Assignment::Bulk(para_id) diff --git a/polkadot/runtime/parachains/src/paras/tests.rs b/polkadot/runtime/parachains/src/paras/tests.rs index 24ea919ec8754a50d6fb9507739a48c8a3eb1120..262ec9d3fdba95c0368d4cfe03b288ccaeb049d1 100644 --- a/polkadot/runtime/parachains/src/paras/tests.rs +++ b/polkadot/runtime/parachains/src/paras/tests.rs @@ -17,7 +17,7 @@ use super::*; use frame_support::{assert_err, assert_ok, assert_storage_noop}; use keyring::Sr25519Keyring; -use primitives::{BlockNumber, PARACHAIN_KEY_TYPE_ID}; +use primitives::{vstaging::SchedulerParams, BlockNumber, PARACHAIN_KEY_TYPE_ID}; use sc_keystore::LocalKeystore; use sp_keystore::{Keystore, KeystorePtr}; use std::sync::Arc; @@ -909,7 +909,10 @@ fn full_parachain_cleanup_storage() { minimum_validation_upgrade_delay: 2, // Those are not relevant to this test. However, HostConfiguration is still a // subject for the consistency check. - paras_availability_period: 1, + scheduler_params: SchedulerParams { + paras_availability_period: 1, + ..Default::default() + }, ..Default::default() }, }, diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index defb2f4404f56c4266c653c4c3242bb68702649b..b7285ec884ad1e6fd326164a1e65f35962214859 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -27,13 +27,14 @@ mod enter { builder::{Bench, BenchBuilder}, mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, scheduler::{ - common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, + common::{Assignment, AssignmentProvider}, ParasEntry, }, }; use assert_matches::assert_matches; use frame_support::assert_ok; use frame_system::limits; + use primitives::vstaging::SchedulerParams; use sp_runtime::Perbill; use sp_std::collections::btree_map::BTreeMap; @@ -87,7 +88,7 @@ mod enter { // `create_inherent` and will not cause `enter` to early. fn include_backed_candidates() { let config = MockGenesisConfig::default(); - assert!(config.configuration.config.scheduling_lookahead > 0); + assert!(config.configuration.config.scheduler_params.lookahead > 0); new_test_ext(config).execute_with(|| { let dispute_statements = BTreeMap::new(); @@ -625,7 +626,7 @@ mod enter { #[test] fn limit_candidates_over_weight_1() { let config = MockGenesisConfig::default(); - assert!(config.configuration.config.scheduling_lookahead > 0); + assert!(config.configuration.config.scheduler_params.lookahead > 0); new_test_ext(config).execute_with(|| { // Create the inherent data for this block @@ -706,8 +707,8 @@ mod enter { let cores = (0..used_cores) .into_iter() .map(|i| { - let AssignmentProviderConfig { ttl, .. } = - scheduler::Pallet::::assignment_provider_config(CoreIndex(i)); + let SchedulerParams { ttl, .. } = + >::config().scheduler_params; // Load an assignment into provider so that one is present to pop let assignment = ::AssignmentProvider::get_mock_assignment( diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index 3dbb050fb4e5e40dc6f7b512bf55c45368a3ac5d..61ab36dbe96c89f0a94f659a444685550b0941c0 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -51,7 +51,7 @@ use sp_std::{ pub mod common; -use common::{Assignment, AssignmentProvider, AssignmentProviderConfig}; +use common::{Assignment, AssignmentProvider}; pub use pallet::*; @@ -222,7 +222,7 @@ impl Pallet { let n_cores = core::cmp::max( T::AssignmentProvider::session_core_count(), - match config.max_validators_per_core { + match config.scheduler_params.max_validators_per_core { Some(x) if x != 0 => validators.len() as u32 / x, _ => 0, }, @@ -350,6 +350,7 @@ impl Pallet { fn drop_expired_claims_from_claimqueue() { let now = >::block_number(); let availability_cores = AvailabilityCores::::get(); + let ttl = >::config().scheduler_params.ttl; ClaimQueue::::mutate(|cq| { for (idx, _) in (0u32..).zip(availability_cores) { @@ -382,8 +383,6 @@ impl Pallet { if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx) { - let AssignmentProviderConfig { ttl, .. } = - T::AssignmentProvider::get_provider_config(core_idx); core_claimqueue.push_back(ParasEntry::new(assignment, now + ttl)); } } @@ -428,7 +427,7 @@ impl Pallet { } let rotations_since_session_start: BlockNumberFor = - (at - session_start_block) / config.group_rotation_frequency; + (at - session_start_block) / config.scheduler_params.group_rotation_frequency; let rotations_since_session_start = as TryInto>::try_into(rotations_since_session_start) @@ -460,9 +459,9 @@ impl Pallet { // Note: blocks backed in this rotation will never time out here as backed_in + // config.paras_availability_period will always be > now for these blocks, as // otherwise above condition would not be true. - pending_since + config.paras_availability_period + pending_since + config.scheduler_params.paras_availability_period } else { - next_rotation + config.paras_availability_period + next_rotation + config.scheduler_params.paras_availability_period }; AvailabilityTimeoutStatus { timed_out: time_out_at <= now, live_until: time_out_at } @@ -478,7 +477,8 @@ impl Pallet { let now = >::block_number() + One::one(); let rotation_info = Self::group_rotation_info(now); - let current_window = rotation_info.last_rotation_at() + config.paras_availability_period; + let current_window = + rotation_info.last_rotation_at() + config.scheduler_params.paras_availability_period; now < current_window } @@ -488,7 +488,7 @@ impl Pallet { ) -> GroupRotationInfo> { let session_start_block = Self::session_start_block(); let group_rotation_frequency = - >::config().group_rotation_frequency; + >::config().scheduler_params.group_rotation_frequency; GroupRotationInfo { session_start_block, now, group_rotation_frequency } } @@ -508,6 +508,8 @@ impl Pallet { /// Return the next thing that will be scheduled on this core assuming it is currently /// occupied and the candidate occupying it times out. pub(crate) fn next_up_on_time_out(core: CoreIndex) -> Option { + let max_availability_timeouts = + >::config().scheduler_params.max_availability_timeouts; Self::next_up_on_available(core).or_else(|| { // Or, if none, the claim currently occupying the core, // as it would be put back on the queue after timing out if number of retries is not at @@ -515,16 +517,12 @@ impl Pallet { let cores = AvailabilityCores::::get(); cores.get(core.0 as usize).and_then(|c| match c { CoreOccupied::Free => None, - CoreOccupied::Paras(pe) => { - let AssignmentProviderConfig { max_availability_timeouts, .. } = - T::AssignmentProvider::get_provider_config(core); - + CoreOccupied::Paras(pe) => if pe.availability_timeouts < max_availability_timeouts { Some(Self::paras_entry_to_scheduled_core(pe)) } else { None - } - }, + }, }) }) } @@ -566,7 +564,7 @@ impl Pallet { // ClaimQueue related functions // fn claimqueue_lookahead() -> u32 { - >::config().scheduling_lookahead + >::config().scheduler_params.lookahead } /// Frees cores and fills the free claimqueue spots by popping from the `AssignmentProvider`. @@ -585,15 +583,15 @@ impl Pallet { let n_lookahead = Self::claimqueue_lookahead().max(1); let n_session_cores = T::AssignmentProvider::session_core_count(); let cq = ClaimQueue::::get(); - let ttl = >::config().on_demand_ttl; + let config = >::config(); + let max_availability_timeouts = config.scheduler_params.max_availability_timeouts; + let ttl = config.scheduler_params.ttl; for core_idx in 0..n_session_cores { let core_idx = CoreIndex::from(core_idx); // add previously timedout paras back into the queue if let Some(mut entry) = timedout_paras.remove(&core_idx) { - let AssignmentProviderConfig { max_availability_timeouts, .. } = - T::AssignmentProvider::get_provider_config(core_idx); if entry.availability_timeouts < max_availability_timeouts { // Increment the timeout counter. entry.availability_timeouts += 1; @@ -668,13 +666,6 @@ impl Pallet { .filter_map(|(core_idx, v)| v.front().map(|e| (core_idx, e.assignment.para_id()))) } - #[cfg(any(feature = "runtime-benchmarks", test))] - pub(crate) fn assignment_provider_config( - core_idx: CoreIndex, - ) -> AssignmentProviderConfig> { - T::AssignmentProvider::get_provider_config(core_idx) - } - #[cfg(any(feature = "try-runtime", test))] fn claimqueue_len() -> usize { ClaimQueue::::get().iter().map(|la_vec| la_vec.1.len()).sum() diff --git a/polkadot/runtime/parachains/src/scheduler/common.rs b/polkadot/runtime/parachains/src/scheduler/common.rs index 2eb73385803c6e62a88fc55526e3ba18218cf06d..66a4e6d30be0830650295d5311b7d7e1fc3b1d20 100644 --- a/polkadot/runtime/parachains/src/scheduler/common.rs +++ b/polkadot/runtime/parachains/src/scheduler/common.rs @@ -48,22 +48,10 @@ impl Assignment { } } -#[derive(Encode, Decode, TypeInfo)] -/// A set of variables required by the scheduler in order to operate. -pub struct AssignmentProviderConfig { - /// How many times a collation can time out on availability. - /// Zero timeouts still means that a collation can be provided as per the slot auction - /// assignment provider. - pub max_availability_timeouts: u32, - - /// How long the collator has to provide a collation to the backing group before being dropped. - pub ttl: BlockNumber, -} - pub trait AssignmentProvider { /// Pops an [`Assignment`] from the provider for a specified [`CoreIndex`]. /// - /// This is where assignments come into existance. + /// This is where assignments come into existence. fn pop_assignment_for_core(core_idx: CoreIndex) -> Option; /// A previously popped `Assignment` has been fully processed. @@ -77,14 +65,11 @@ pub trait AssignmentProvider { /// Push back a previously popped assignment. /// /// If the assignment could not be processed within the current session, it can be pushed back - /// to the assignment provider in order to be poppped again later. + /// to the assignment provider in order to be popped again later. /// /// This is the second way the life of an assignment can come to an end. fn push_back_assignment(assignment: Assignment); - /// Returns a set of variables needed by the scheduler - fn get_provider_config(core_idx: CoreIndex) -> AssignmentProviderConfig; - /// Push some assignment for mocking/benchmarks purposes. /// /// Useful for benchmarks and testing. The returned assignment is "valid" and can if need be diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs index 9af23ce64bd67ab0901dd1a03e849d51cfffe342..28b3a6b4f5d61cd2e1934b6fa723e5abe5c8bd4b 100644 --- a/polkadot/runtime/parachains/src/scheduler/tests.rs +++ b/polkadot/runtime/parachains/src/scheduler/tests.rs @@ -18,7 +18,9 @@ use super::*; use frame_support::assert_ok; use keyring::Sr25519Keyring; -use primitives::{BlockNumber, SessionIndex, ValidationCode, ValidatorId}; +use primitives::{ + vstaging::SchedulerParams, BlockNumber, SessionIndex, ValidationCode, ValidatorId, +}; use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use crate::{ @@ -103,15 +105,19 @@ fn run_to_end_of_block( fn default_config() -> HostConfiguration { HostConfiguration { - coretime_cores: 3, - group_rotation_frequency: 10, - paras_availability_period: 3, - scheduling_lookahead: 2, // This field does not affect anything that scheduler does. However, `HostConfiguration` // is still a subject to consistency test. It requires that // `minimum_validation_upgrade_delay` is greater than `chain_availability_period` and // `thread_availability_period`. minimum_validation_upgrade_delay: 6, + scheduler_params: SchedulerParams { + group_rotation_frequency: 10, + paras_availability_period: 3, + lookahead: 2, + num_cores: 3, + max_availability_timeouts: 1, + ..Default::default() + }, ..Default::default() } } @@ -155,7 +161,7 @@ fn scheduled_entries() -> impl Iterator(vec![para_a])); } else { assert!(!claimqueue_contains_para_ids::(vec![para_a])); @@ -822,7 +825,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { assert!(!availability_cores_contains_para_ids::(vec![para_a])); // #25 - now += max_retries + 2; + now += max_timeouts + 2; // Add assignment back to the mix. MockAssigner::add_test_assignment(assignment_a.clone()); @@ -833,7 +836,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { // #26 now += 1; // Run to block #n but this time have group 1 conclude the availabilty. - for n in now..=(now + max_retries + 1) { + for n in now..=(now + max_timeouts + 1) { // #n run_to_block(n, |_| None); // Time out core 0 if group 0 is assigned to it, if group 1 is assigned, conclude. @@ -874,10 +877,8 @@ fn on_demand_claims_are_pruned_after_timing_out() { fn availability_predicate_works() { let genesis_config = genesis_config(&default_config()); - let HostConfiguration { group_rotation_frequency, paras_availability_period, .. } = - default_config(); - - assert!(paras_availability_period < group_rotation_frequency); + let SchedulerParams { group_rotation_frequency, paras_availability_period, .. } = + default_config().scheduler_params; new_test_ext(genesis_config).execute_with(|| { run_to_block(1 + paras_availability_period, |_| None); @@ -1044,7 +1045,7 @@ fn next_up_on_time_out_reuses_claim_if_nothing_queued() { #[test] fn session_change_requires_reschedule_dropping_removed_paras() { let mut config = default_config(); - config.scheduling_lookahead = 1; + config.scheduler_params.lookahead = 1; let genesis_config = genesis_config(&config); let para_a = ParaId::from(1_u32); @@ -1056,7 +1057,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { new_test_ext(genesis_config).execute_with(|| { // Setting explicit core count MockAssigner::set_core_count(5); - let assignment_provider_ttl = MockAssigner::get_provider_config(CoreIndex::from(0)).ttl; + let coretime_ttl = >::config().scheduler_params.ttl; schedule_blank_para(para_a); schedule_blank_para(para_b); @@ -1120,7 +1121,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { vec![ParasEntry::new( Assignment::Bulk(para_a), // At end of block 2 - assignment_provider_ttl + 2 + coretime_ttl + 2 )] .into_iter() .collect() @@ -1169,7 +1170,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { vec![ParasEntry::new( Assignment::Bulk(para_a), // At block 3 - assignment_provider_ttl + 3 + coretime_ttl + 3 )] .into_iter() .collect() @@ -1179,7 +1180,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { vec![ParasEntry::new( Assignment::Bulk(para_b), // At block 3 - assignment_provider_ttl + 3 + coretime_ttl + 3 )] .into_iter() .collect() diff --git a/polkadot/runtime/parachains/src/session_info/migration.rs b/polkadot/runtime/parachains/src/session_info/migration.rs index 228c1e3bb2515b14f5fe59a89578b03be065955d..ea6f81834b5db6eb7336521d3d471a6c3df49985 100644 --- a/polkadot/runtime/parachains/src/session_info/migration.rs +++ b/polkadot/runtime/parachains/src/session_info/migration.rs @@ -18,5 +18,5 @@ use frame_support::traits::StorageVersion; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); diff --git a/polkadot/runtime/parachains/src/session_info/tests.rs b/polkadot/runtime/parachains/src/session_info/tests.rs index 92a50575deda8413abb18f892680d693c148d0cb..a5bfeae0745599c00de718684420fc377a9184f1 100644 --- a/polkadot/runtime/parachains/src/session_info/tests.rs +++ b/polkadot/runtime/parachains/src/session_info/tests.rs @@ -25,7 +25,7 @@ use crate::{ util::take_active_subset, }; use keyring::Sr25519Keyring; -use primitives::{BlockNumber, ValidatorId, ValidatorIndex}; +use primitives::{vstaging::SchedulerParams, BlockNumber, ValidatorId, ValidatorIndex}; fn run_to_block( to: BlockNumber, @@ -62,9 +62,9 @@ fn run_to_block( fn default_config() -> HostConfiguration { HostConfiguration { - coretime_cores: 1, dispute_period: 2, needed_approvals: 3, + scheduler_params: SchedulerParams { num_cores: 1, ..Default::default() }, ..Default::default() } } diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 3dc59cc172817409bd607f5b68e52163a7b9afdb..61ca1635ac9567cfdebbb05c4e92c97489387a91 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -246,6 +246,7 @@ runtime-benchmarks = [ "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-tips/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", diff --git a/polkadot/runtime/rococo/constants/src/weights/block_weights.rs b/polkadot/runtime/rococo/constants/src/weights/block_weights.rs index e2aa4a6cab7febc90418c3222819109e256a18c8..f7dc2f19316d5e6b13deb76044fb0ebf43b25d3b 100644 --- a/polkadot/runtime/rococo/constants/src/weights/block_weights.rs +++ b/polkadot/runtime/rococo/constants/src/weights/block_weights.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26 (Y/M/D) -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29 (Y/M/D) +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `block`, LONG-NAME: `BlockExecution`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` -//! WEIGHT-PATH: `runtime/rococo/constants/src/weights/` +//! WEIGHT-PATH: `./polkadot/runtime/rococo/constants/src/weights/` //! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` // Executed Command: @@ -28,12 +28,11 @@ // benchmark // overhead // --chain=rococo-dev -// --execution=wasm // --wasm-execution=compiled -// --weight-path=runtime/rococo/constants/src/weights/ +// --weight-path=./polkadot/runtime/rococo/constants/src/weights/ // --warmup=10 // --repeat=100 -// --header=./file_header.txt +// --header=./polkadot/file_header.txt use sp_core::parameter_types; use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; @@ -43,17 +42,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 408_659, 450_716 - /// Average: 417_412 - /// Median: 411_177 - /// Std-Dev: 12242.31 + /// Min, Max: 440_142, 476_907 + /// Average: 450_240 + /// Median: 448_633 + /// Std-Dev: 7301.18 /// /// Percentiles nanoseconds: - /// 99th: 445_142 - /// 95th: 442_275 - /// 75th: 414_217 + /// 99th: 470_733 + /// 95th: 465_082 + /// 75th: 452_536 pub const BlockExecutionWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(417_412), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(450_240), 0); } #[cfg(test)] diff --git a/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs b/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs index adce840ebbc120a4e7905ad6312b9d27b1e8d3fb..000cee8a237c3ef306e3356f9733422765b67306 100644 --- a/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs +++ b/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26 (Y/M/D) -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29 (Y/M/D) +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `extrinsic`, LONG-NAME: `ExtrinsicBase`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` -//! WEIGHT-PATH: `runtime/rococo/constants/src/weights/` +//! WEIGHT-PATH: `./polkadot/runtime/rococo/constants/src/weights/` //! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` // Executed Command: @@ -28,12 +28,11 @@ // benchmark // overhead // --chain=rococo-dev -// --execution=wasm // --wasm-execution=compiled -// --weight-path=runtime/rococo/constants/src/weights/ +// --weight-path=./polkadot/runtime/rococo/constants/src/weights/ // --warmup=10 // --repeat=100 -// --header=./file_header.txt +// --header=./polkadot/file_header.txt use sp_core::parameter_types; use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; @@ -43,17 +42,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 97_574, 100_119 - /// Average: 98_236 - /// Median: 98_179 - /// Std-Dev: 394.9 + /// Min, Max: 92_961, 94_143 + /// Average: 93_369 + /// Median: 93_331 + /// Std-Dev: 217.39 /// /// Percentiles nanoseconds: - /// 99th: 99_893 - /// 95th: 98_850 - /// 75th: 98_318 + /// 99th: 93_848 + /// 95th: 93_691 + /// 75th: 93_514 pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(98_236), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(93_369), 0); } #[cfg(test)] diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index bbc79a13d37f4987eb7bfc4f1619b8141255467c..ff54c7776701c1ccaa512518e0470655fcd0afde 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -149,7 +149,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 24, @@ -199,6 +199,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type MaxConsumers = frame_support::traits::ConstU32<16>; } @@ -329,6 +330,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -591,7 +593,7 @@ where // so the actual block number is `n`. .saturating_sub(1); let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -603,16 +605,17 @@ where frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ); - let raw_payload = SignedPayload::new(call, extra) + ) + .into(); + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; - let (call, extra, _) = raw_payload.deconstruct(); + let (call, tx_ext, _) = raw_payload.deconstruct(); let address = ::Lookup::unlookup(account); - Some((call, (address, signature, extra))) + Some((call, (address, signature, tx_ext))) } } @@ -633,12 +636,32 @@ parameter_types! { pub Prefix: &'static [u8] = b"Pay ROCs to the Rococo account:"; } +#[cfg(feature = "runtime-benchmarks")] +pub struct ClaimsHelper; + +#[cfg(feature = "runtime-benchmarks")] +use frame_support::dispatch::DispatchInfo; + +#[cfg(feature = "runtime-benchmarks")] +impl claims::BenchmarkHelperTrait for ClaimsHelper { + fn default_call_and_info() -> (RuntimeCall, DispatchInfo) { + use frame_support::dispatch::GetDispatchInfo; + let call = RuntimeCall::Claims(claims::Call::attest { + statement: claims::StatementKind::Regular.to_text().to_vec(), + }); + let info = call.get_dispatch_info(); + (call, info) + } +} + impl claims::Config for Runtime { type RuntimeEvent = RuntimeEvent; type VestingSchedule = Vesting; type Prefix = Prefix; type MoveClaimOrigin = EnsureRoot; type WeightInfo = weights::runtime_common_claims::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = ClaimsHelper; } parameter_types! { @@ -1447,8 +1470,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The `SignedExtension` to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -1461,7 +1484,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// All migrations that will run on the next runtime upgrade. /// @@ -1658,6 +1681,7 @@ pub mod migrations { parachains_configuration::migration::v11::MigrateToV11, // This needs to come after the `parachains_configuration` above as we are reading the configuration. coretime::migration::MigrateToCoretime, + parachains_configuration::migration::v12::MigrateToV12, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, @@ -1674,7 +1698,7 @@ pub type Executive = frame_executive::Executive< Migrations, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; parameter_types! { // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) @@ -1745,7 +1769,9 @@ mod benches { [pallet_scheduler, Scheduler] [pallet_sudo, Sudo] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_treasury, Treasury] [pallet_utility, Utility] [pallet_vesting, Vesting] @@ -1768,7 +1794,7 @@ sp_api::impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -2239,6 +2265,7 @@ sp_api::impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use frame_benchmarking::baseline::Pallet as Baseline; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -2259,6 +2286,7 @@ sp_api::impl_runtime_apis! { use frame_support::traits::WhitelistedStorageKeys; use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use frame_benchmarking::baseline::Pallet as Baseline; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use sp_storage::TrackedStorageKey; @@ -2335,6 +2363,13 @@ sp_api::impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::here()), + fun: Fungible(ExistentialDeposit::get()), + } + } } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = XcmConfig; diff --git a/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs b/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs index dfba0cfc4aa90938c70376796117eea9ff00b676..0f68a5c6fb373b5d7b871aba540aa7210d710a53 100644 --- a/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs +++ b/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `frame_benchmarking::baseline` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=frame_benchmarking::baseline // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/frame_benchmarking_baseline.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,8 +55,8 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 157_000 picoseconds. - Weight::from_parts(175_233, 0) + // Minimum execution time: 172_000 picoseconds. + Weight::from_parts(199_481, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 1000000]`. @@ -61,8 +64,8 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 149_000 picoseconds. - Weight::from_parts(183_285, 0) + // Minimum execution time: 171_000 picoseconds. + Weight::from_parts(197_821, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 1000000]`. @@ -70,8 +73,8 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 158_000 picoseconds. - Weight::from_parts(184_720, 0) + // Minimum execution time: 172_000 picoseconds. + Weight::from_parts(200_942, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 1000000]`. @@ -79,16 +82,16 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 152_000 picoseconds. - Weight::from_parts(177_496, 0) + // Minimum execution time: 170_000 picoseconds. + Weight::from_parts(196_906, 0) .saturating_add(Weight::from_parts(0, 0)) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 19_907_376_000 picoseconds. - Weight::from_parts(19_988_727_000, 0) + // Minimum execution time: 23_346_876_000 picoseconds. + Weight::from_parts(23_363_744_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 100]`. @@ -96,10 +99,10 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 198_000 picoseconds. - Weight::from_parts(228_000, 0) + // Minimum execution time: 201_000 picoseconds. + Weight::from_parts(219_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 20_467 - .saturating_add(Weight::from_parts(47_443_635, 0).saturating_mul(i.into())) + // Standard Error: 14_372 + .saturating_add(Weight::from_parts(45_375_800, 0).saturating_mul(i.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/frame_system.rs b/polkadot/runtime/rococo/src/weights/frame_system.rs index 2e49483dcc62728f3554bb1364efd740f8b03fd2..1742a761ca77baa50c79f51cb4ac854cba0fa274 100644 --- a/polkadot/runtime/rococo/src/weights/frame_system.rs +++ b/polkadot/runtime/rococo/src/weights/frame_system.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `frame_system` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=frame_system // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,91 +55,91 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_283_000 picoseconds. - Weight::from_parts(2_305_000, 0) + // Minimum execution time: 1_541_000 picoseconds. + Weight::from_parts(2_581_470, 0) .saturating_add(Weight::from_parts(0, 0)) // Standard Error: 0 - .saturating_add(Weight::from_parts(366, 0).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_435_000 picoseconds. - Weight::from_parts(7_581_000, 0) + // Minimum execution time: 5_060_000 picoseconds. + Weight::from_parts(5_167_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_408, 0).saturating_mul(b.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_696, 0).saturating_mul(b.into())) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a686561707061676573` (r:0 w:1) - /// Proof Skipped: unknown `0x3a686561707061676573` (r:0 w:1) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) fn set_heap_pages() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 4_010_000 picoseconds. - Weight::from_parts(4_112_000, 0) + // Minimum execution time: 2_649_000 picoseconds. + Weight::from_parts(2_909_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a636f6465` (r:0 w:1) - /// Proof Skipped: unknown `0x3a636f6465` (r:0 w:1) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn set_code() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 80_405_511_000 picoseconds. - Weight::from_parts(83_066_478_000, 0) + // Minimum execution time: 88_417_540_000 picoseconds. + Weight::from_parts(91_809_291_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn set_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_210_000 picoseconds. - Weight::from_parts(2_247_000, 0) + // Minimum execution time: 1_538_000 picoseconds. + Weight::from_parts(1_589_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_058 - .saturating_add(Weight::from_parts(673_943, 0).saturating_mul(i.into())) + // Standard Error: 1_740 + .saturating_add(Weight::from_parts(730_941, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn kill_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_125_000 picoseconds. - Weight::from_parts(2_154_000, 0) + // Minimum execution time: 1_567_000 picoseconds. + Weight::from_parts(1_750_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 816 - .saturating_add(Weight::from_parts(491_194, 0).saturating_mul(i.into())) + // Standard Error: 835 + .saturating_add(Weight::from_parts(543_218, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `129 + p * (69 ±0)` - // Estimated: `125 + p * (70 ±0)` - // Minimum execution time: 4_002_000 picoseconds. - Weight::from_parts(4_145_000, 0) - .saturating_add(Weight::from_parts(0, 125)) - // Standard Error: 1_108 - .saturating_add(Weight::from_parts(1_014_971, 0).saturating_mul(p.into())) + // Measured: `80 + p * (69 ±0)` + // Estimated: `83 + p * (70 ±0)` + // Minimum execution time: 3_412_000 picoseconds. + Weight::from_parts(3_448_000, 0) + .saturating_add(Weight::from_parts(0, 83)) + // Standard Error: 1_395 + .saturating_add(Weight::from_parts(1_142_347, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) @@ -147,8 +150,8 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 33_027_000 picoseconds. - Weight::from_parts(33_027_000, 0) + // Minimum execution time: 9_178_000 picoseconds. + Weight::from_parts(9_780_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +165,8 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `1518` - // Minimum execution time: 118_101_992_000 picoseconds. - Weight::from_parts(118_101_992_000, 0) + // Minimum execution time: 94_523_563_000 picoseconds. + Weight::from_parts(96_983_131_000, 0) .saturating_add(Weight::from_parts(0, 1518)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs b/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..40520591f533f8722b0d3cd9f68847288c28eb38 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,123 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=frame_system_extensions +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_262_000 picoseconds. + Weight::from_parts(3_497_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_416_000 picoseconds. + Weight::from_parts(5_690_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 471_000 picoseconds. + Weight::from_parts(552_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 4_847_000 picoseconds. + Weight::from_parts(5_091_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 388_000 picoseconds. + Weight::from_parts(421_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 378_000 picoseconds. + Weight::from_parts(440_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 3_402_000 picoseconds. + Weight::from_parts(3_627_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs index 7328dca9393693e335b49dc317d6754f9fd6e840..128a3083a9c7139854fd2c41d75f82422617f1e4 100644 --- a/polkadot/runtime/rococo/src/weights/mod.rs +++ b/polkadot/runtime/rococo/src/weights/mod.rs @@ -16,6 +16,7 @@ //! A list of the different weight modules for our runtime. pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_rate; pub mod pallet_balances_balances; pub mod pallet_balances_nis_counterpart_balances; @@ -36,6 +37,7 @@ pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_sudo; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_vesting; diff --git a/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs b/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs index da2d1958cefcfeb41bcdba7ed2dd9c6b4272e873..56b1e2cbc5717fa932d9a5419773ce5fbd246d7f 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs @@ -16,25 +16,28 @@ //! Autogenerated weights for `pallet_asset_rate` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-03, STEPS: `50`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/debug/polkadot +// ./target/production/polkadot // benchmark // pallet // --chain=rococo-dev // --steps=50 -// --repeat=2 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_asset_rate // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./runtime/rococo/src/weights/ -// --header=./file_header.txt +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,39 +50,39 @@ use core::marker::PhantomData; /// Weight functions for `pallet_asset_rate`. pub struct WeightInfo(PhantomData); impl pallet_asset_rate::WeightInfo for WeightInfo { - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `4702` - // Minimum execution time: 143_000_000 picoseconds. - Weight::from_parts(155_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `142` + // Estimated: `4703` + // Minimum execution time: 10_277_000 picoseconds. + Weight::from_parts(10_487_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) fn update() -> Weight { // Proof Size summary in bytes: - // Measured: `110` - // Estimated: `4702` - // Minimum execution time: 156_000_000 picoseconds. - Weight::from_parts(172_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `210` + // Estimated: `4703` + // Minimum execution time: 10_917_000 picoseconds. + Weight::from_parts(11_249_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) fn remove() -> Weight { // Proof Size summary in bytes: - // Measured: `110` - // Estimated: `4702` - // Minimum execution time: 150_000_000 picoseconds. - Weight::from_parts(160_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `210` + // Estimated: `4703` + // Minimum execution time: 11_332_000 picoseconds. + Weight::from_parts(11_866_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs b/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs index 1b0ae1eeece41b10aff0c65181fe757cfdc4dd0e..3fa54f2c36972b90b5304ef3ab3a962d9866ed27 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_balances` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_balances // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -54,8 +56,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 44_127_000 picoseconds. - Weight::from_parts(45_099_000, 0) + // Minimum execution time: 45_336_000 picoseconds. + Weight::from_parts(46_189_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -66,8 +68,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 34_265_000 picoseconds. - Weight::from_parts(35_083_000, 0) + // Minimum execution time: 34_880_000 picoseconds. + Weight::from_parts(35_770_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -78,8 +80,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 12_189_000 picoseconds. - Weight::from_parts(12_655_000, 0) + // Minimum execution time: 12_904_000 picoseconds. + Weight::from_parts(13_260_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -90,8 +92,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 16_910_000 picoseconds. - Weight::from_parts(17_474_000, 0) + // Minimum execution time: 17_669_000 picoseconds. + Weight::from_parts(18_228_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -102,8 +104,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6196` - // Minimum execution time: 45_212_000 picoseconds. - Weight::from_parts(46_320_000, 0) + // Minimum execution time: 46_492_000 picoseconds. + Weight::from_parts(47_639_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -114,8 +116,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 42_500_000 picoseconds. - Weight::from_parts(43_991_000, 0) + // Minimum execution time: 44_342_000 picoseconds. + Weight::from_parts(45_144_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -126,8 +128,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 15_197_000 picoseconds. - Weight::from_parts(15_749_000, 0) + // Minimum execution time: 15_260_000 picoseconds. + Weight::from_parts(15_775_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -140,11 +142,11 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0 + u * (135 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 14_414_000 picoseconds. - Weight::from_parts(14_685_000, 0) + // Minimum execution time: 14_703_000 picoseconds. + Weight::from_parts(14_950_000, 0) .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 7_918 - .saturating_add(Weight::from_parts(13_095_420, 0).saturating_mul(u.into())) + // Standard Error: 7_665 + .saturating_add(Weight::from_parts(13_335_803, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -153,8 +155,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_239_000 picoseconds. - Weight::from_parts(5_617_000, 0) + // Minimum execution time: 5_506_000 picoseconds. + Weight::from_parts(5_753_000, 0) .saturating_add(Weight::from_parts(0, 0)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs b/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs index 6cca9b9320a6558ae6f1a5192e615d62b4acf8f4..1852ba6c2c4da6c814ca520b4d73a6b4654f84f7 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_balances` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_balances // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -56,8 +58,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6164` - // Minimum execution time: 41_978_000 picoseconds. - Weight::from_parts(42_989_000, 0) + // Minimum execution time: 42_443_000 picoseconds. + Weight::from_parts(43_250_000, 0) .saturating_add(Weight::from_parts(0, 6164)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -70,8 +72,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6164` - // Minimum execution time: 32_250_000 picoseconds. - Weight::from_parts(33_074_000, 0) + // Minimum execution time: 32_417_000 picoseconds. + Weight::from_parts(33_247_000, 0) .saturating_add(Weight::from_parts(0, 6164)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -82,8 +84,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3577` - // Minimum execution time: 9_906_000 picoseconds. - Weight::from_parts(10_397_000, 0) + // Minimum execution time: 10_091_000 picoseconds. + Weight::from_parts(10_426_000, 0) .saturating_add(Weight::from_parts(0, 3577)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -96,8 +98,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `277` // Estimated: `3593` - // Minimum execution time: 16_298_000 picoseconds. - Weight::from_parts(17_115_000, 0) + // Minimum execution time: 16_546_000 picoseconds. + Weight::from_parts(17_259_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -110,8 +112,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `6196` - // Minimum execution time: 43_283_000 picoseconds. - Weight::from_parts(44_033_000, 0) + // Minimum execution time: 44_322_000 picoseconds. + Weight::from_parts(45_319_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) @@ -124,8 +126,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6164` - // Minimum execution time: 40_564_000 picoseconds. - Weight::from_parts(41_597_000, 0) + // Minimum execution time: 40_852_000 picoseconds. + Weight::from_parts(42_205_000, 0) .saturating_add(Weight::from_parts(0, 6164)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -138,8 +140,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `277` // Estimated: `3593` - // Minimum execution time: 15_018_000 picoseconds. - Weight::from_parts(15_532_000, 0) + // Minimum execution time: 15_050_000 picoseconds. + Weight::from_parts(15_813_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -154,11 +156,11 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0 + u * (256 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 14_470_000 picoseconds. - Weight::from_parts(14_828_000, 0) + // Minimum execution time: 14_830_000 picoseconds. + Weight::from_parts(15_061_000, 0) .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 15_515 - .saturating_add(Weight::from_parts(14_505_553, 0).saturating_mul(u.into())) + // Standard Error: 16_072 + .saturating_add(Weight::from_parts(14_981_430, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -167,8 +169,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_277_000 picoseconds. - Weight::from_parts(5_628_000, 0) + // Minimum execution time: 5_344_000 picoseconds. + Weight::from_parts(5_735_000, 0) .saturating_add(Weight::from_parts(0, 0)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_bounties.rs b/polkadot/runtime/rococo/src/weights/pallet_bounties.rs index 38d3645316f25faba74fff9d37c66db2ab4f08d9..8f8be5f2386f7983d42dbad355a266ff68cf332a 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_bounties.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_bounties.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_bounties` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_bounties // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,118 +50,181 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bounties`. pub struct WeightInfo(PhantomData); impl pallet_bounties::WeightInfo for WeightInfo { - /// Storage: Bounties BountyCount (r:1 w:1) - /// Proof: Bounties BountyCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(16400), added: 18875, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:0 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyCount` (r:1 w:1) + /// Proof: `Bounties::BountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:0 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 16384]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `210` // Estimated: `3593` - // Minimum execution time: 28_907_000 picoseconds. - Weight::from_parts(31_356_074, 0) + // Minimum execution time: 21_772_000 picoseconds. + Weight::from_parts(22_861_341, 0) .saturating_add(Weight::from_parts(0, 3593)) - // Standard Error: 18 - .saturating_add(Weight::from_parts(606, 0).saturating_mul(d.into())) + // Standard Error: 3 + .saturating_add(Weight::from_parts(721, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `302` + // Estimated: `3642` + // Minimum execution time: 11_218_000 picoseconds. + Weight::from_parts(11_796_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `322` + // Estimated: `3642` + // Minimum execution time: 10_959_000 picoseconds. + Weight::from_parts(11_658_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `498` + // Estimated: `3642` + // Minimum execution time: 37_419_000 picoseconds. + Weight::from_parts(38_362_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `494` + // Estimated: `3642` + // Minimum execution time: 27_328_000 picoseconds. + Weight::from_parts(27_661_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `400` + // Estimated: `3642` + // Minimum execution time: 16_067_000 picoseconds. + Weight::from_parts(16_865_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `764` + // Estimated: `8799` + // Minimum execution time: 101_153_000 picoseconds. + Weight::from_parts(102_480_000, 0) + .saturating_add(Weight::from_parts(0, 8799)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(16400), added: 18875, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `482` + // Measured: `444` // Estimated: `3642` - // Minimum execution time: 46_020_000 picoseconds. - Weight::from_parts(46_711_000, 0) + // Minimum execution time: 38_838_000 picoseconds. + Weight::from_parts(39_549_000, 0) .saturating_add(Weight::from_parts(0, 3642)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `680` + // Estimated: `6196` + // Minimum execution time: 68_592_000 picoseconds. + Weight::from_parts(70_727_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn extend_bounty_expiry() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `358` + // Estimated: `3642` + // Minimum execution time: 11_272_000 picoseconds. + Weight::from_parts(11_592_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:100 w:100) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:200 w:200) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 100]`. - fn spend_funds(_b: u32, ) -> Weight { + fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1887` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(2_405_233, 0) + // Measured: `0 + b * (297 ±0)` + // Estimated: `1887 + b * (5206 ±0)` + // Minimum execution time: 2_844_000 picoseconds. + Weight::from_parts(2_900_000, 0) .saturating_add(Weight::from_parts(0, 1887)) + // Standard Error: 9_467 + .saturating_add(Weight::from_parts(32_326_595, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(b.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs b/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs index e8c798d45e727618b892e4ebd7e11d7ba81f66ca..47ae3a5c90d1e8524b914bb3b4b5bbaeb71f3c25 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_child_bounties` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_child_bounties // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,69 +50,153 @@ use core::marker::PhantomData; /// Weight functions for `pallet_child_bounties`. pub struct WeightInfo(PhantomData); impl pallet_child_bounties::WeightInfo for WeightInfo { + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyCount` (r:1 w:1) + /// Proof: `ChildBounties::ChildBountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:0 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 16384]`. - fn add_child_bounty(_d: u32, ) -> Weight { + fn add_child_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `540` + // Estimated: `6196` + // Minimum execution time: 57_964_000 picoseconds. + Weight::from_parts(59_559_565, 0) + .saturating_add(Weight::from_parts(0, 6196)) + // Standard Error: 11 + .saturating_add(Weight::from_parts(697, 0).saturating_mul(d.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `594` + // Estimated: `3642` + // Minimum execution time: 17_527_000 picoseconds. + Weight::from_parts(18_257_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `740` + // Estimated: `3642` + // Minimum execution time: 29_354_000 picoseconds. + Weight::from_parts(30_629_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `740` + // Estimated: `3642` + // Minimum execution time: 40_643_000 picoseconds. + Weight::from_parts(42_072_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `637` + // Estimated: `3642` + // Minimum execution time: 18_616_000 picoseconds. + Weight::from_parts(19_316_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `576` + // Estimated: `8799` + // Minimum execution time: 96_376_000 picoseconds. + Weight::from_parts(98_476_000, 0) + .saturating_add(Weight::from_parts(0, 8799)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `840` + // Estimated: `6196` + // Minimum execution time: 64_640_000 picoseconds. + Weight::from_parts(66_174_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `1027` + // Estimated: `8799` + // Minimum execution time: 78_159_000 picoseconds. + Weight::from_parts(79_820_000, 0) + .saturating_add(Weight::from_parts(0, 8799)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(7)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs b/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs index ba505737f1b070d21931c7e467b4853de1f119d2..5d92c158df44ee17d9e473cc222e4bae76597b67 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs @@ -16,17 +16,17 @@ //! Autogenerated weights for `pallet_conviction_voting` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot // benchmark // pallet -// --chain=kusama-dev +// --chain=rococo-dev // --steps=50 // --repeat=20 // --no-storage-info @@ -36,8 +36,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/kusama/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,144 +50,152 @@ use core::marker::PhantomData; /// Weight functions for `pallet_conviction_voting`. pub struct WeightInfo(PhantomData); impl pallet_conviction_voting::WeightInfo for WeightInfo { - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `13445` + // Measured: `13407` // Estimated: `42428` - // Minimum execution time: 151_077_000 picoseconds. - Weight::from_parts(165_283_000, 0) + // Minimum execution time: 128_378_000 picoseconds. + Weight::from_parts(131_028_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `14166` + // Measured: `14128` // Estimated: `83866` - // Minimum execution time: 232_420_000 picoseconds. - Weight::from_parts(244_439_000, 0) + // Minimum execution time: 155_379_000 picoseconds. + Weight::from_parts(161_597_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn remove_vote() -> Weight { // Proof Size summary in bytes: // Measured: `13918` // Estimated: `83866` - // Minimum execution time: 205_017_000 picoseconds. - Weight::from_parts(216_594_000, 0) + // Minimum execution time: 130_885_000 picoseconds. + Weight::from_parts(138_080_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn remove_other_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `13004` + // Measured: `13005` // Estimated: `30706` - // Minimum execution time: 84_226_000 picoseconds. - Weight::from_parts(91_255_000, 0) + // Minimum execution time: 71_743_000 picoseconds. + Weight::from_parts(75_170_000, 0) .saturating_add(Weight::from_parts(0, 30706)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:512 w:512) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:50) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 512]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `29640 + r * (365 ±0)` + // Measured: `29602 + r * (365 ±0)` // Estimated: `83866 + r * (3411 ±0)` - // Minimum execution time: 78_708_000 picoseconds. - Weight::from_parts(2_053_488_615, 0) + // Minimum execution time: 58_504_000 picoseconds. + Weight::from_parts(814_301_018, 0) .saturating_add(Weight::from_parts(0, 83866)) - // Standard Error: 179_271 - .saturating_add(Weight::from_parts(47_806_482, 0).saturating_mul(r.into())) + // Standard Error: 59_961 + .saturating_add(Weight::from_parts(20_002_833, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes(45)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:512 w:512) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:50) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 512]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `29555 + r * (365 ±0)` // Estimated: `83866 + r * (3411 ±0)` - // Minimum execution time: 45_232_000 picoseconds. - Weight::from_parts(2_045_021_014, 0) + // Minimum execution time: 34_970_000 picoseconds. + Weight::from_parts(771_155_804, 0) .saturating_add(Weight::from_parts(0, 83866)) - // Standard Error: 185_130 - .saturating_add(Weight::from_parts(47_896_011, 0).saturating_mul(r.into())) + // Standard Error: 57_795 + .saturating_add(Weight::from_parts(19_781_645, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(43)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) fn unlock() -> Weight { // Proof Size summary in bytes: - // Measured: `12218` + // Measured: `12180` // Estimated: `30706` - // Minimum execution time: 116_446_000 picoseconds. - Weight::from_parts(124_043_000, 0) + // Minimum execution time: 89_648_000 picoseconds. + Weight::from_parts(97_144_000, 0) .saturating_add(Weight::from_parts(0, 30706)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_identity.rs b/polkadot/runtime/rococo/src/weights/pallet_identity.rs index b334e21ea03127a749ff1bf2455f69627f832922..6df16351f2c28f96f807bb197f9059b51e19f703 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_identity.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_identity.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_identity` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_identity // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,290 +50,291 @@ use core::marker::PhantomData; /// Weight functions for `pallet_identity`. pub struct WeightInfo(PhantomData); impl pallet_identity::WeightInfo for WeightInfo { - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `32 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 12_290_000 picoseconds. - Weight::from_parts(12_664_362, 0) + // Minimum execution time: 7_673_000 picoseconds. + Weight::from_parts(8_351_866, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 1_347 - .saturating_add(Weight::from_parts(88_179, 0).saturating_mul(r.into())) + // Standard Error: 1_302 + .saturating_add(Weight::from_parts(79_198, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn set_identity(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `442 + r * (5 ±0)` - // Estimated: `11003` - // Minimum execution time: 31_373_000 picoseconds. - Weight::from_parts(30_435_545, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_307 - .saturating_add(Weight::from_parts(92_753, 0).saturating_mul(r.into())) + // Measured: `6978 + r * (5 ±0)` + // Estimated: `11037` + // Minimum execution time: 111_646_000 picoseconds. + Weight::from_parts(113_254_991, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 6_611 + .saturating_add(Weight::from_parts(162_119, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:100 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:100 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `101` - // Estimated: `11003 + s * (2589 ±0)` - // Minimum execution time: 9_251_000 picoseconds. - Weight::from_parts(22_039_210, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 40_779 - .saturating_add(Weight::from_parts(2_898_525, 0).saturating_mul(s.into())) + // Estimated: `11037 + s * (2589 ±0)` + // Minimum execution time: 8_010_000 picoseconds. + Weight::from_parts(19_868_412, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 5_018 + .saturating_add(Weight::from_parts(3_115_007, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 100]`. fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `194 + p * (32 ±0)` - // Estimated: `11003` - // Minimum execution time: 9_329_000 picoseconds. - Weight::from_parts(24_055_061, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 3_428 - .saturating_add(Weight::from_parts(1_130_604, 0).saturating_mul(p.into())) + // Estimated: `11037` + // Minimum execution time: 8_111_000 picoseconds. + Weight::from_parts(19_482_392, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 3_156 + .saturating_add(Weight::from_parts(1_305_890, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - fn clear_identity(_r: u32, s: u32, ) -> Weight { + fn clear_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 53_365_000 picoseconds. - Weight::from_parts(35_391_422, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 1_353 - .saturating_add(Weight::from_parts(1_074_019, 0).saturating_mul(s.into())) + // Measured: `7070 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 54_107_000 picoseconds. + Weight::from_parts(56_347_715, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 10_944 + .saturating_add(Weight::from_parts(191_321, 0).saturating_mul(r.into())) + // Standard Error: 2_135 + .saturating_add(Weight::from_parts(1_295_872, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `367 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 32_509_000 picoseconds. - Weight::from_parts(31_745_585, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_214 - .saturating_add(Weight::from_parts(83_822, 0).saturating_mul(r.into())) - + // Measured: `6968 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 75_780_000 picoseconds. + Weight::from_parts(76_869_773, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 5_456 + .saturating_add(Weight::from_parts(135_316, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `398 + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 29_609_000 picoseconds. - Weight::from_parts(28_572_602, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_528 - .saturating_add(Weight::from_parts(85_593, 0).saturating_mul(r.into())) + // Measured: `6999` + // Estimated: `11037` + // Minimum execution time: 75_769_000 picoseconds. + Weight::from_parts(76_805_143, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 3_598 + .saturating_add(Weight::from_parts(84_593, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_793_000 picoseconds. - Weight::from_parts(8_173_888, 0) + // Minimum execution time: 5_357_000 picoseconds. + Weight::from_parts(5_732_132, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 1_569 - .saturating_add(Weight::from_parts(72_367, 0).saturating_mul(r.into())) + // Standard Error: 927 + .saturating_add(Weight::from_parts(70_832, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_708_000 picoseconds. - Weight::from_parts(8_091_149, 0) + // Minimum execution time: 5_484_000 picoseconds. + Weight::from_parts(5_892_704, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 869 - .saturating_add(Weight::from_parts(87_993, 0).saturating_mul(r.into())) + // Standard Error: 947 + .saturating_add(Weight::from_parts(71_231, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_601_000 picoseconds. - Weight::from_parts(8_038_414, 0) + // Minimum execution time: 5_310_000 picoseconds. + Weight::from_parts(5_766_651, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 1_041 - .saturating_add(Weight::from_parts(82_588, 0).saturating_mul(r.into())) + // Standard Error: 916 + .saturating_add(Weight::from_parts(74_776, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `445 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 23_114_000 picoseconds. - Weight::from_parts(22_076_548, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_881 - .saturating_add(Weight::from_parts(109_812, 0).saturating_mul(r.into())) + // Measured: `7046 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 98_200_000 picoseconds. + Weight::from_parts(100_105_482, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 6_152 + .saturating_add(Weight::from_parts(58_906, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 70_007_000 picoseconds. - Weight::from_parts(50_186_495, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 6_533 - .saturating_add(Weight::from_parts(15_486, 0).saturating_mul(r.into())) - // Standard Error: 1_275 - .saturating_add(Weight::from_parts(1_085_117, 0).saturating_mul(s.into())) + // Measured: `7277 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 64_647_000 picoseconds. + Weight::from_parts(68_877_027, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 9_965 + .saturating_add(Weight::from_parts(135_044, 0).saturating_mul(r.into())) + // Standard Error: 1_944 + .saturating_add(Weight::from_parts(1_388_151, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `475 + s * (36 ±0)` - // Estimated: `11003` - // Minimum execution time: 28_453_000 picoseconds. - Weight::from_parts(33_165_934, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 1_217 - .saturating_add(Weight::from_parts(65_401, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 23_550_000 picoseconds. + Weight::from_parts(29_439_842, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 1_453 + .saturating_add(Weight::from_parts(96_324, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `591 + s * (3 ±0)` - // Estimated: `11003` - // Minimum execution time: 12_846_000 picoseconds. - Weight::from_parts(14_710_284, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 496 - .saturating_add(Weight::from_parts(19_539, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 13_704_000 picoseconds. + Weight::from_parts(15_241_441, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 498 + .saturating_add(Weight::from_parts(40_973, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `638 + s * (35 ±0)` - // Estimated: `11003` - // Minimum execution time: 32_183_000 picoseconds. - Weight::from_parts(35_296_731, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 854 - .saturating_add(Weight::from_parts(52_028, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 29_310_000 picoseconds. + Weight::from_parts(31_712_666, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 967 + .saturating_add(Weight::from_parts(81_250, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `704 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 24_941_000 picoseconds. - Weight::from_parts(27_433_059, 0) + // Minimum execution time: 22_906_000 picoseconds. + Weight::from_parts(24_638_729, 0) .saturating_add(Weight::from_parts(0, 6723)) - // Standard Error: 856 - .saturating_add(Weight::from_parts(57_463, 0).saturating_mul(s.into())) + // Standard Error: 645 + .saturating_add(Weight::from_parts(75_121, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -340,90 +344,93 @@ impl pallet_identity::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_873_000 picoseconds. - Weight::from_parts(13_873_000, 0) + // Minimum execution time: 6_056_000 picoseconds. + Weight::from_parts(6_349_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Identity::UsernameAuthorities` (r:0 w:1) + /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn remove_username_authority() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_653_000 picoseconds. - Weight::from_parts(10_653_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 9_003_000 picoseconds. + Weight::from_parts(9_276_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Identity::PendingUsernames` (r:1 w:0) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_username_for() -> Weight { // Proof Size summary in bytes: // Measured: `80` // Estimated: `11037` - // Minimum execution time: 75_928_000 picoseconds. - Weight::from_parts(75_928_000, 0) + // Minimum execution time: 64_724_000 picoseconds. + Weight::from_parts(66_597_000, 0) .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:0 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn accept_username() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `115` // Estimated: `11037` - // Minimum execution time: 38_157_000 picoseconds. - Weight::from_parts(38_157_000, 0) + // Minimum execution time: 19_538_000 picoseconds. + Weight::from_parts(20_204_000, 0) .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn remove_expired_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3542` - // Minimum execution time: 46_821_000 picoseconds. - Weight::from_parts(46_821_000, 0) - .saturating_add(Weight::from_parts(0, 3542)) + // Measured: `115` + // Estimated: `3550` + // Minimum execution time: 16_000_000 picoseconds. + Weight::from_parts(19_354_000, 0) + .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:0) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_primary_username() -> Weight { // Proof Size summary in bytes: - // Measured: `247` + // Measured: `257` // Estimated: `11037` - // Minimum execution time: 22_515_000 picoseconds. - Weight::from_parts(22_515_000, 0) + // Minimum execution time: 15_298_000 picoseconds. + Weight::from_parts(15_760_000, 0) .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:0) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn remove_dangling_username() -> Weight { // Proof Size summary in bytes: - // Measured: `126` + // Measured: `98` // Estimated: `11037` - // Minimum execution time: 15_997_000 picoseconds. - Weight::from_parts(15_997_000, 0) + // Minimum execution time: 10_829_000 picoseconds. + Weight::from_parts(11_113_000, 0) .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_indices.rs b/polkadot/runtime/rococo/src/weights/pallet_indices.rs index 99ffd3210ed24f04e170a66c57b01ea9232b0475..434db97d4a79faa0d51dcad13c16d6840f357483 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_indices.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_indices.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_indices` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_indices // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,66 +50,66 @@ use core::marker::PhantomData; /// Weight functions for `pallet_indices`. pub struct WeightInfo(PhantomData); impl pallet_indices::WeightInfo for WeightInfo { - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn claim() -> Weight { // Proof Size summary in bytes: - // Measured: `142` + // Measured: `4` // Estimated: `3534` - // Minimum execution time: 25_107_000 picoseconds. - Weight::from_parts(25_655_000, 0) + // Minimum execution time: 18_092_000 picoseconds. + Weight::from_parts(18_533_000, 0) .saturating_add(Weight::from_parts(0, 3534)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `341` + // Measured: `203` // Estimated: `3593` - // Minimum execution time: 36_208_000 picoseconds. - Weight::from_parts(36_521_000, 0) + // Minimum execution time: 31_616_000 picoseconds. + Weight::from_parts(32_556_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn free() -> Weight { // Proof Size summary in bytes: - // Measured: `238` + // Measured: `100` // Estimated: `3534` - // Minimum execution time: 25_915_000 picoseconds. - Weight::from_parts(26_220_000, 0) + // Minimum execution time: 19_593_000 picoseconds. + Weight::from_parts(20_100_000, 0) .saturating_add(Weight::from_parts(0, 3534)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `341` + // Measured: `203` // Estimated: `3593` - // Minimum execution time: 28_232_000 picoseconds. - Weight::from_parts(28_845_000, 0) + // Minimum execution time: 21_429_000 picoseconds. + Weight::from_parts(22_146_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: - // Measured: `238` + // Measured: `100` // Estimated: `3534` - // Minimum execution time: 27_282_000 picoseconds. - Weight::from_parts(27_754_000, 0) + // Minimum execution time: 20_425_000 picoseconds. + Weight::from_parts(21_023_000, 0) .saturating_add(Weight::from_parts(0, 3534)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs b/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs index e1e360d374a0646b4fed8a29f3509565309d89c0..6ebfcd060b642d03c6eebdf7918d032253041e28 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_message_queue` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_message_queue // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,150 +50,149 @@ use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `248` + // Measured: `281` // Estimated: `6050` - // Minimum execution time: 12_106_000 picoseconds. - Weight::from_parts(12_387_000, 0) + // Minimum execution time: 12_830_000 picoseconds. + Weight::from_parts(13_476_000, 0) .saturating_add(Weight::from_parts(0, 6050)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `248` + // Measured: `281` // Estimated: `6050` - // Minimum execution time: 11_227_000 picoseconds. - Weight::from_parts(11_616_000, 0) + // Minimum execution time: 11_583_000 picoseconds. + Weight::from_parts(11_902_000, 0) .saturating_add(Weight::from_parts(0, 6050)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3520` - // Minimum execution time: 5_052_000 picoseconds. - Weight::from_parts(5_216_000, 0) + // Minimum execution time: 3_801_000 picoseconds. + Weight::from_parts(3_943_000, 0) .saturating_add(Weight::from_parts(0, 3520)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: // Measured: `115` // Estimated: `36283` - // Minimum execution time: 6_522_000 picoseconds. - Weight::from_parts(6_794_000, 0) + // Minimum execution time: 5_517_000 picoseconds. + Weight::from_parts(5_861_000, 0) .saturating_add(Weight::from_parts(0, 36283)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: // Measured: `115` // Estimated: `36283` - // Minimum execution time: 6_918_000 picoseconds. - Weight::from_parts(7_083_000, 0) + // Minimum execution time: 5_870_000 picoseconds. + Weight::from_parts(6_028_000, 0) .saturating_add(Weight::from_parts(0, 36283)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 28_445_000 picoseconds. - Weight::from_parts(28_659_000, 0) + // Minimum execution time: 80_681_000 picoseconds. + Weight::from_parts(81_818_000, 0) .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `149` + // Measured: `220` // Estimated: `3520` - // Minimum execution time: 7_224_000 picoseconds. - Weight::from_parts(7_441_000, 0) + // Minimum execution time: 8_641_000 picoseconds. + Weight::from_parts(8_995_000, 0) .saturating_add(Weight::from_parts(0, 3520)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `33232` + // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 45_211_000 picoseconds. - Weight::from_parts(45_505_000, 0) + // Minimum execution time: 38_473_000 picoseconds. + Weight::from_parts(39_831_000, 0) .saturating_add(Weight::from_parts(0, 36283)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `33232` + // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 52_346_000 picoseconds. - Weight::from_parts(52_745_000, 0) + // Minimum execution time: 48_717_000 picoseconds. + Weight::from_parts(49_724_000, 0) .saturating_add(Weight::from_parts(0, 36283)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `33232` + // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 72_567_000 picoseconds. - Weight::from_parts(73_300_000, 0) + // Minimum execution time: 72_718_000 picoseconds. + Weight::from_parts(74_081_000, 0) .saturating_add(Weight::from_parts(0, 36283)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_multisig.rs b/polkadot/runtime/rococo/src/weights/pallet_multisig.rs index a4f33fe198ca167e9b8112a13b1549ba727da354..f1b81759ece6c4c360a4bfb61afafb76836d11b2 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_multisig.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_multisig.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_multisig` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_multisig // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,110 +55,110 @@ impl pallet_multisig::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_475_000 picoseconds. - Weight::from_parts(11_904_745, 0) + // Minimum execution time: 12_023_000 picoseconds. + Weight::from_parts(12_643_116, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(492, 0).saturating_mul(z.into())) + // Standard Error: 3 + .saturating_add(Weight::from_parts(582, 0).saturating_mul(z.into())) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + s * (2 ±0)` + // Measured: `229 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 38_857_000 picoseconds. - Weight::from_parts(33_611_791, 0) + // Minimum execution time: 39_339_000 picoseconds. + Weight::from_parts(27_243_033, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 400 - .saturating_add(Weight::from_parts(59_263, 0).saturating_mul(s.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_211, 0).saturating_mul(z.into())) + // Standard Error: 1_319 + .saturating_add(Weight::from_parts(142_212, 0).saturating_mul(s.into())) + // Standard Error: 12 + .saturating_add(Weight::from_parts(1_592, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[3, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `248` // Estimated: `6811` - // Minimum execution time: 25_715_000 picoseconds. - Weight::from_parts(20_607_294, 0) + // Minimum execution time: 27_647_000 picoseconds. + Weight::from_parts(15_828_725, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 285 - .saturating_add(Weight::from_parts(58_225, 0).saturating_mul(s.into())) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_160, 0).saturating_mul(z.into())) + // Standard Error: 908 + .saturating_add(Weight::from_parts(130_880, 0).saturating_mul(s.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_532, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `317 + s * (33 ±0)` + // Measured: `354 + s * (33 ±0)` // Estimated: `6811` - // Minimum execution time: 43_751_000 picoseconds. - Weight::from_parts(37_398_513, 0) + // Minimum execution time: 46_971_000 picoseconds. + Weight::from_parts(32_150_393, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 426 - .saturating_add(Weight::from_parts(70_904, 0).saturating_mul(s.into())) - // Standard Error: 4 - .saturating_add(Weight::from_parts(1_235, 0).saturating_mul(z.into())) + // Standard Error: 1_129 + .saturating_add(Weight::from_parts(154_796, 0).saturating_mul(s.into())) + // Standard Error: 11 + .saturating_add(Weight::from_parts(1_603, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + s * (2 ±0)` + // Measured: `229 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 31_278_000 picoseconds. - Weight::from_parts(32_075_573, 0) + // Minimum execution time: 24_947_000 picoseconds. + Weight::from_parts(26_497_183, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 452 - .saturating_add(Weight::from_parts(62_018, 0).saturating_mul(s.into())) + // Standard Error: 1_615 + .saturating_add(Weight::from_parts(147_071, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `248` // Estimated: `6811` - // Minimum execution time: 18_178_000 picoseconds. - Weight::from_parts(18_649_867, 0) + // Minimum execution time: 13_897_000 picoseconds. + Weight::from_parts(14_828_339, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 293 - .saturating_add(Weight::from_parts(56_475, 0).saturating_mul(s.into())) + // Standard Error: 1_136 + .saturating_add(Weight::from_parts(133_925, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `383 + s * (1 ±0)` + // Measured: `420 + s * (1 ±0)` // Estimated: `6811` - // Minimum execution time: 32_265_000 picoseconds. - Weight::from_parts(32_984_014, 0) + // Minimum execution time: 28_984_000 picoseconds. + Weight::from_parts(29_853_232, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 452 - .saturating_add(Weight::from_parts(59_934, 0).saturating_mul(s.into())) + // Standard Error: 650 + .saturating_add(Weight::from_parts(113_440, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_nis.rs b/polkadot/runtime/rococo/src/weights/pallet_nis.rs index 35dad482129e6c63e0aca0aceb544c66fba78ee7..38b41f3a8e241185e1ccb40332bab8e96a846d2b 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_nis.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_nis.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_nis` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_nis // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,202 +50,186 @@ use core::marker::PhantomData; /// Weight functions for `pallet_nis`. pub struct WeightInfo(PhantomData); impl pallet_nis::WeightInfo for WeightInfo { - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6209 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 44_704_000 picoseconds. - Weight::from_parts(44_933_886, 0) + // Minimum execution time: 39_592_000 picoseconds. + Weight::from_parts(38_234_037, 0) .saturating_add(Weight::from_parts(0, 51487)) - // Standard Error: 712 - .saturating_add(Weight::from_parts(71_570, 0).saturating_mul(l.into())) + // Standard Error: 1_237 + .saturating_add(Weight::from_parts(88_816, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn place_bid_max() -> Weight { // Proof Size summary in bytes: // Measured: `54211` // Estimated: `51487` - // Minimum execution time: 126_544_000 picoseconds. - Weight::from_parts(128_271_000, 0) + // Minimum execution time: 134_847_000 picoseconds. + Weight::from_parts(139_510_000, 0) .saturating_add(Weight::from_parts(0, 51487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6209 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 47_640_000 picoseconds. - Weight::from_parts(42_214_261, 0) + // Minimum execution time: 43_330_000 picoseconds. + Weight::from_parts(35_097_881, 0) .saturating_add(Weight::from_parts(0, 51487)) - // Standard Error: 732 - .saturating_add(Weight::from_parts(87_277, 0).saturating_mul(l.into())) + // Standard Error: 1_119 + .saturating_add(Weight::from_parts(73_640, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Summary (r:1 w:0) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:0) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn fund_deficit() -> Weight { // Proof Size summary in bytes: // Measured: `225` // Estimated: `3593` - // Minimum execution time: 38_031_000 picoseconds. - Weight::from_parts(38_441_000, 0) + // Minimum execution time: 29_989_000 picoseconds. + Weight::from_parts(30_865_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances TotalIssuance (r:1 w:1) - /// Proof: NisCounterpartBalances TotalIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances Account (r:1 w:1) - /// Proof: NisCounterpartBalances Account (max_values: None, max_size: Some(112), added: 2587, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `NisCounterpartBalances::Account` (r:1 w:1) + /// Proof: `NisCounterpartBalances::Account` (`max_values`: None, `max_size`: Some(112), added: 2587, mode: `MaxEncodedLen`) fn communify() -> Weight { // Proof Size summary in bytes: - // Measured: `469` + // Measured: `387` // Estimated: `3593` - // Minimum execution time: 69_269_000 picoseconds. - Weight::from_parts(70_000_000, 0) + // Minimum execution time: 58_114_000 picoseconds. + Weight::from_parts(59_540_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances Account (r:1 w:1) - /// Proof: NisCounterpartBalances Account (max_values: None, max_size: Some(112), added: 2587, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances TotalIssuance (r:1 w:1) - /// Proof: NisCounterpartBalances TotalIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NisCounterpartBalances::Account` (r:1 w:1) + /// Proof: `NisCounterpartBalances::Account` (`max_values`: None, `max_size`: Some(112), added: 2587, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) fn privatize() -> Weight { // Proof Size summary in bytes: - // Measured: `659` + // Measured: `543` // Estimated: `3593` - // Minimum execution time: 85_763_000 picoseconds. - Weight::from_parts(86_707_000, 0) + // Minimum execution time: 75_780_000 picoseconds. + Weight::from_parts(77_097_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) fn thaw_private() -> Weight { // Proof Size summary in bytes: // Measured: `387` // Estimated: `3593` - // Minimum execution time: 47_336_000 picoseconds. - Weight::from_parts(47_623_000, 0) + // Minimum execution time: 46_133_000 picoseconds. + Weight::from_parts(47_250_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances Account (r:1 w:1) - /// Proof: NisCounterpartBalances Account (max_values: None, max_size: Some(112), added: 2587, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances TotalIssuance (r:1 w:1) - /// Proof: NisCounterpartBalances TotalIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `NisCounterpartBalances::Account` (r:1 w:1) + /// Proof: `NisCounterpartBalances::Account` (`max_values`: None, `max_size`: Some(112), added: 2587, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn thaw_communal() -> Weight { // Proof Size summary in bytes: - // Measured: `604` + // Measured: `488` // Estimated: `3593` - // Minimum execution time: 90_972_000 picoseconds. - Weight::from_parts(92_074_000, 0) + // Minimum execution time: 77_916_000 picoseconds. + Weight::from_parts(79_427_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn process_queues() -> Weight { // Proof Size summary in bytes: // Measured: `6658` // Estimated: `7487` - // Minimum execution time: 21_469_000 picoseconds. - Weight::from_parts(21_983_000, 0) + // Minimum execution time: 22_992_000 picoseconds. + Weight::from_parts(24_112_000, 0) .saturating_add(Weight::from_parts(0, 7487)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) fn process_queue() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `51487` - // Minimum execution time: 4_912_000 picoseconds. - Weight::from_parts(5_013_000, 0) + // Minimum execution time: 3_856_000 picoseconds. + Weight::from_parts(4_125_000, 0) .saturating_add(Weight::from_parts(0, 51487)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Nis Receipts (r:0 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:0 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn process_bid() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_048_000 picoseconds. - Weight::from_parts(7_278_000, 0) + // Minimum execution time: 4_344_000 picoseconds. + Weight::from_parts(4_545_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_preimage.rs b/polkadot/runtime/rococo/src/weights/pallet_preimage.rs index e051ebd5bbab84cf98d34853666926a0f281e5c1..7a2b77b84d80cad3067f41a63da845af86e16816 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_preimage.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_preimage.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_preimage` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_preimage // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,184 +50,219 @@ use core::marker::PhantomData; /// Weight functions for `pallet_preimage`. pub struct WeightInfo(PhantomData); impl pallet_preimage::WeightInfo for WeightInfo { - fn ensure_updated(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `193 + n * (91 ±0)` - // Estimated: `3593 + n * (2566 ±0)` - // Minimum execution time: 2_000_000 picoseconds. - Weight::from_parts(2_000_000, 3593) - // Standard Error: 13_720 - .saturating_add(Weight::from_parts(17_309_199, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) - } - - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `3556` - // Minimum execution time: 31_040_000 picoseconds. - Weight::from_parts(31_236_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_974, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `114` + // Estimated: `3568` + // Minimum execution time: 40_363_000 picoseconds. + Weight::from_parts(41_052_000, 0) + .saturating_add(Weight::from_parts(0, 3568)) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_298, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 18_025_000 picoseconds. - Weight::from_parts(18_264_000, 0) + // Minimum execution time: 14_570_000 picoseconds. + Weight::from_parts(14_890_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_974, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_364, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 17_122_000 picoseconds. - Weight::from_parts(17_332_000, 0) + // Minimum execution time: 13_933_000 picoseconds. + Weight::from_parts(14_290_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_968, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_349, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `361` - // Estimated: `3556` - // Minimum execution time: 38_218_000 picoseconds. - Weight::from_parts(39_841_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `315` + // Estimated: `3568` + // Minimum execution time: 54_373_000 picoseconds. + Weight::from_parts(58_205_000, 0) + .saturating_add(Weight::from_parts(0, 3568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3556` - // Minimum execution time: 23_217_000 picoseconds. - Weight::from_parts(24_246_000, 0) + // Minimum execution time: 24_267_000 picoseconds. + Weight::from_parts(27_063_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `260` // Estimated: `3556` - // Minimum execution time: 21_032_000 picoseconds. - Weight::from_parts(21_844_000, 0) + // Minimum execution time: 25_569_000 picoseconds. + Weight::from_parts(27_895_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3556` - // Minimum execution time: 13_954_000 picoseconds. - Weight::from_parts(14_501_000, 0) + // Minimum execution time: 14_182_000 picoseconds. + Weight::from_parts(16_098_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `114` // Estimated: `3556` - // Minimum execution time: 14_874_000 picoseconds. - Weight::from_parts(15_380_000, 0) + // Minimum execution time: 14_681_000 picoseconds. + Weight::from_parts(15_549_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 10_199_000 picoseconds. - Weight::from_parts(10_493_000, 0) + // Minimum execution time: 9_577_000 picoseconds. + Weight::from_parts(10_146_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3556` - // Minimum execution time: 21_772_000 picoseconds. - Weight::from_parts(22_554_000, 0) + // Minimum execution time: 21_003_000 picoseconds. + Weight::from_parts(23_549_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 10_115_000 picoseconds. - Weight::from_parts(10_452_000, 0) + // Minimum execution time: 9_507_000 picoseconds. + Weight::from_parts(10_013_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 10_031_000 picoseconds. - Weight::from_parts(10_310_000, 0) + // Minimum execution time: 9_293_000 picoseconds. + Weight::from_parts(10_055_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Preimage::StatusFor` (r:1023 w:1023) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1023 w:1023) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1023 w:1023) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:0 w:1023) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 1024]`. + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + n * (227 ±0)` + // Estimated: `990 + n * (2603 ±0)` + // Minimum execution time: 48_846_000 picoseconds. + Weight::from_parts(49_378_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 38_493 + .saturating_add(Weight::from_parts(47_418_285, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) + } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_proxy.rs b/polkadot/runtime/rococo/src/weights/pallet_proxy.rs index d9737a85c05ad2b55759c141539f34fc60d3c808..c92025930950e61c3d4d253b2ae33e153ef70ccb 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_proxy.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_proxy.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_proxy` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_proxy // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,172 +50,176 @@ use core::marker::PhantomData; /// Weight functions for `pallet_proxy`. pub struct WeightInfo(PhantomData); impl pallet_proxy::WeightInfo for WeightInfo { - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 15_956_000 picoseconds. - Weight::from_parts(16_300_358, 0) + // Minimum execution time: 11_267_000 picoseconds. + Weight::from_parts(11_798_007, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 652 - .saturating_add(Weight::from_parts(30_807, 0).saturating_mul(p.into())) + // Standard Error: 858 + .saturating_add(Weight::from_parts(43_735, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `554 + a * (68 ±0) + p * (37 ±0)` + // Measured: `416 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 37_584_000 picoseconds. - Weight::from_parts(37_858_207, 0) + // Minimum execution time: 32_791_000 picoseconds. + Weight::from_parts(32_776_904, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_868 - .saturating_add(Weight::from_parts(148_967, 0).saturating_mul(a.into())) - // Standard Error: 1_930 - .saturating_add(Weight::from_parts(13_017, 0).saturating_mul(p.into())) + // Standard Error: 2_382 + .saturating_add(Weight::from_parts(143_857, 0).saturating_mul(a.into())) + // Standard Error: 2_461 + .saturating_add(Weight::from_parts(40_024, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. - fn remove_announcement(a: u32, _p: u32, ) -> Weight { + fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `469 + a * (68 ±0)` + // Measured: `331 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 24_642_000 picoseconds. - Weight::from_parts(25_526_588, 0) + // Minimum execution time: 21_831_000 picoseconds. + Weight::from_parts(22_479_938, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_138 - .saturating_add(Weight::from_parts(131_157, 0).saturating_mul(a.into())) + // Standard Error: 1_738 + .saturating_add(Weight::from_parts(146_532, 0).saturating_mul(a.into())) + // Standard Error: 1_796 + .saturating_add(Weight::from_parts(7_499, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. - fn reject_announcement(a: u32, _p: u32, ) -> Weight { + fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `469 + a * (68 ±0)` + // Measured: `331 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 24_377_000 picoseconds. - Weight::from_parts(25_464_033, 0) + // Minimum execution time: 21_776_000 picoseconds. + Weight::from_parts(22_762_843, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_116 - .saturating_add(Weight::from_parts(130_722, 0).saturating_mul(a.into())) + // Standard Error: 1_402 + .saturating_add(Weight::from_parts(137_512, 0).saturating_mul(a.into())) + // Standard Error: 1_449 + .saturating_add(Weight::from_parts(3_645, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `486 + a * (68 ±0) + p * (37 ±0)` + // Measured: `348 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 34_202_000 picoseconds. - Weight::from_parts(34_610_079, 0) + // Minimum execution time: 29_108_000 picoseconds. + Weight::from_parts(29_508_910, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_234 - .saturating_add(Weight::from_parts(134_197, 0).saturating_mul(a.into())) - // Standard Error: 1_275 - .saturating_add(Weight::from_parts(15_970, 0).saturating_mul(p.into())) + // Standard Error: 2_268 + .saturating_add(Weight::from_parts(144_770, 0).saturating_mul(a.into())) + // Standard Error: 2_343 + .saturating_add(Weight::from_parts(25_851, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_492_000 picoseconds. - Weight::from_parts(25_984_867, 0) + // Minimum execution time: 18_942_000 picoseconds. + Weight::from_parts(19_518_812, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 893 - .saturating_add(Weight::from_parts(51_868, 0).saturating_mul(p.into())) + // Standard Error: 1_078 + .saturating_add(Weight::from_parts(46_147, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_492_000 picoseconds. - Weight::from_parts(26_283_445, 0) + // Minimum execution time: 18_993_000 picoseconds. + Weight::from_parts(19_871_741, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_442 - .saturating_add(Weight::from_parts(53_504, 0).saturating_mul(p.into())) + // Standard Error: 1_883 + .saturating_add(Weight::from_parts(46_033, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_083_000 picoseconds. - Weight::from_parts(22_688_835, 0) + // Minimum execution time: 17_849_000 picoseconds. + Weight::from_parts(18_776_170, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 994 - .saturating_add(Weight::from_parts(32_994, 0).saturating_mul(p.into())) + // Standard Error: 1_239 + .saturating_add(Weight::from_parts(27_960, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `239` + // Measured: `101` // Estimated: `4706` - // Minimum execution time: 27_042_000 picoseconds. - Weight::from_parts(27_624_587, 0) + // Minimum execution time: 20_049_000 picoseconds. + Weight::from_parts(20_881_515, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 671 - .saturating_add(Weight::from_parts(5_888, 0).saturating_mul(p.into())) + // Standard Error: 952 + .saturating_add(Weight::from_parts(5_970, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `264 + p * (37 ±0)` + // Measured: `126 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 23_396_000 picoseconds. - Weight::from_parts(24_003_080, 0) + // Minimum execution time: 18_528_000 picoseconds. + Weight::from_parts(19_384_189, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 684 - .saturating_add(Weight::from_parts(29_878, 0).saturating_mul(p.into())) + // Standard Error: 1_106 + .saturating_add(Weight::from_parts(35_698, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs b/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs index ce9d5fcc0c7131b227e205a470532e8042cc93ae..fa2decb16716656c9329050c6a211b30ccf24769 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_ranked_collective` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_ranked_collective // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_ranked_collective -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -60,8 +62,8 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `42` // Estimated: `3507` - // Minimum execution time: 13_480_000 picoseconds. - Weight::from_parts(13_786_000, 0) + // Minimum execution time: 13_428_000 picoseconds. + Weight::from_parts(14_019_000, 0) .saturating_add(Weight::from_parts(0, 3507)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) @@ -79,11 +81,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `516 + r * (281 ±0)` // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 28_771_000 picoseconds. - Weight::from_parts(29_256_825, 0) + // Minimum execution time: 28_566_000 picoseconds. + Weight::from_parts(29_346_952, 0) .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 21_594 - .saturating_add(Weight::from_parts(14_649_527, 0).saturating_mul(r.into())) + // Standard Error: 21_068 + .saturating_add(Weight::from_parts(14_471_237, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(6)) @@ -103,11 +105,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `214 + r * (17 ±0)` // Estimated: `3507` - // Minimum execution time: 16_117_000 picoseconds. - Weight::from_parts(16_978_453, 0) + // Minimum execution time: 16_161_000 picoseconds. + Weight::from_parts(16_981_334, 0) .saturating_add(Weight::from_parts(0, 3507)) - // Standard Error: 4_511 - .saturating_add(Weight::from_parts(324_261, 0).saturating_mul(r.into())) + // Standard Error: 4_596 + .saturating_add(Weight::from_parts(313_386, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -124,11 +126,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `532 + r * (72 ±0)` // Estimated: `3519` - // Minimum execution time: 28_995_000 picoseconds. - Weight::from_parts(31_343_215, 0) + // Minimum execution time: 28_406_000 picoseconds. + Weight::from_parts(31_178_557, 0) .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 16_438 - .saturating_add(Weight::from_parts(637_462, 0).saturating_mul(r.into())) + // Standard Error: 17_737 + .saturating_add(Weight::from_parts(627_757, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(6)) } @@ -140,15 +142,17 @@ impl pallet_ranked_collective::WeightInfo for WeightInf /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote() -> Weight { // Proof Size summary in bytes: // Measured: `603` // Estimated: `83866` - // Minimum execution time: 38_820_000 picoseconds. - Weight::from_parts(40_240_000, 0) + // Minimum execution time: 41_164_000 picoseconds. + Weight::from_parts(42_163_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -161,11 +165,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `400 + n * (50 ±0)` // Estimated: `4365 + n * (2540 ±0)` - // Minimum execution time: 12_972_000 picoseconds. - Weight::from_parts(15_829_333, 0) + // Minimum execution time: 13_183_000 picoseconds. + Weight::from_parts(15_604_064, 0) .saturating_add(Weight::from_parts(0, 4365)) - // Standard Error: 1_754 - .saturating_add(Weight::from_parts(1_116_520, 0).saturating_mul(n.into())) + // Standard Error: 2_018 + .saturating_add(Weight::from_parts(1_101_088, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -183,8 +187,8 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `337` // Estimated: `6048` - // Minimum execution time: 44_601_000 picoseconds. - Weight::from_parts(45_714_000, 0) + // Minimum execution time: 43_603_000 picoseconds. + Weight::from_parts(44_809_000, 0) .saturating_add(Weight::from_parts(0, 6048)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(10)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_recovery.rs b/polkadot/runtime/rococo/src/weights/pallet_recovery.rs new file mode 100644 index 0000000000000000000000000000000000000000..ed79aa2b1f175d65d33efad4901d1800fb5cbc28 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_recovery.rs @@ -0,0 +1,186 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_recovery` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_recovery +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_recovery`. +pub struct WeightInfo(PhantomData); +impl pallet_recovery::WeightInfo for WeightInfo { + /// Storage: `Recovery::Proxy` (r:1 w:0) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn as_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `215` + // Estimated: `3545` + // Minimum execution time: 7_899_000 picoseconds. + Weight::from_parts(8_205_000, 0) + .saturating_add(Weight::from_parts(0, 3545)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Recovery::Proxy` (r:0 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn set_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_258_000 picoseconds. + Weight::from_parts(6_494_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn create_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `3816` + // Minimum execution time: 19_369_000 picoseconds. + Weight::from_parts(20_185_132, 0) + .saturating_add(Weight::from_parts(0, 3816)) + // Standard Error: 4_275 + .saturating_add(Weight::from_parts(78_024, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + fn initiate_recovery() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `3854` + // Minimum execution time: 22_425_000 picoseconds. + Weight::from_parts(23_171_000, 0) + .saturating_add(Weight::from_parts(0, 3854)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn vouch_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `294 + n * (64 ±0)` + // Estimated: `3854` + // Minimum execution time: 17_308_000 picoseconds. + Weight::from_parts(18_118_782, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 4_309 + .saturating_add(Weight::from_parts(126_278, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn claim_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `326 + n * (64 ±0)` + // Estimated: `3854` + // Minimum execution time: 20_755_000 picoseconds. + Weight::from_parts(21_821_713, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 4_550 + .saturating_add(Weight::from_parts(101_916, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn close_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `447 + n * (32 ±0)` + // Estimated: `3854` + // Minimum execution time: 29_957_000 picoseconds. + Weight::from_parts(31_010_309, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 5_913 + .saturating_add(Weight::from_parts(110_070, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn remove_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `204 + n * (32 ±0)` + // Estimated: `3854` + // Minimum execution time: 24_430_000 picoseconds. + Weight::from_parts(24_462_856, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 13_646 + .saturating_add(Weight::from_parts(507_715, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn cancel_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `215` + // Estimated: `3545` + // Minimum execution time: 9_686_000 picoseconds. + Weight::from_parts(10_071_000, 0) + .saturating_add(Weight::from_parts(0, 3545)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs b/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs index 96f172230e13f5aed92b47338b2387f4db79fa27..6dfcea2b8327a853927c45aeef76036752650718 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs @@ -16,27 +16,28 @@ //! Autogenerated weights for `pallet_referenda` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_referenda // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_referenda -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -59,10 +60,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `327` + // Measured: `292` // Estimated: `42428` - // Minimum execution time: 29_909_000 picoseconds. - Weight::from_parts(30_645_000, 0) + // Minimum execution time: 24_053_000 picoseconds. + Weight::from_parts(25_121_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -71,15 +72,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `438` + // Measured: `403` // Estimated: `83866` - // Minimum execution time: 54_405_000 picoseconds. - Weight::from_parts(55_583_000, 0) + // Minimum execution time: 45_064_000 picoseconds. + Weight::from_parts(46_112_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -89,15 +92,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `2076` + // Measured: `2041` // Estimated: `42428` - // Minimum execution time: 110_477_000 picoseconds. - Weight::from_parts(119_187_000, 0) + // Minimum execution time: 94_146_000 picoseconds. + Weight::from_parts(98_587_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -107,15 +112,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `2117` + // Measured: `2082` // Estimated: `42428` - // Minimum execution time: 111_467_000 picoseconds. - Weight::from_parts(117_758_000, 0) + // Minimum execution time: 93_002_000 picoseconds. + Weight::from_parts(96_924_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -125,15 +132,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `774` + // Measured: `739` // Estimated: `83866` - // Minimum execution time: 191_135_000 picoseconds. - Weight::from_parts(210_535_000, 0) + // Minimum execution time: 160_918_000 picoseconds. + Weight::from_parts(175_603_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -143,24 +152,26 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `639` + // Measured: `604` // Estimated: `83866` - // Minimum execution time: 67_168_000 picoseconds. - Weight::from_parts(68_895_000, 0) + // Minimum execution time: 55_253_000 picoseconds. + Weight::from_parts(56_488_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `351` + // Measured: `317` // Estimated: `4365` - // Minimum execution time: 31_298_000 picoseconds. - Weight::from_parts(32_570_000, 0) + // Minimum execution time: 24_497_000 picoseconds. + Weight::from_parts(25_280_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -169,10 +180,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `201` + // Measured: `167` // Estimated: `4365` - // Minimum execution time: 15_674_000 picoseconds. - Weight::from_parts(16_190_000, 0) + // Minimum execution time: 11_374_000 picoseconds. + Weight::from_parts(11_817_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -181,15 +192,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `383` + // Measured: `348` // Estimated: `83866` - // Minimum execution time: 38_927_000 picoseconds. - Weight::from_parts(40_545_000, 0) + // Minimum execution time: 31_805_000 picoseconds. + Weight::from_parts(32_622_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -197,15 +210,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:0) /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `484` + // Measured: `449` // Estimated: `83866` - // Minimum execution time: 80_209_000 picoseconds. - Weight::from_parts(82_084_000, 0) + // Minimum execution time: 62_364_000 picoseconds. + Weight::from_parts(63_798_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:0) /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) @@ -213,10 +228,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `140` // Estimated: `4277` - // Minimum execution time: 9_520_000 picoseconds. - Weight::from_parts(10_088_000, 0) + // Minimum execution time: 8_811_000 picoseconds. + Weight::from_parts(9_224_000, 0) .saturating_add(Weight::from_parts(0, 4277)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -231,10 +246,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `2376` + // Measured: `2341` // Estimated: `42428` - // Minimum execution time: 93_893_000 picoseconds. - Weight::from_parts(101_065_000, 0) + // Minimum execution time: 83_292_000 picoseconds. + Weight::from_parts(89_114_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -249,10 +264,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `2362` + // Measured: `2327` // Estimated: `42428` - // Minimum execution time: 98_811_000 picoseconds. - Weight::from_parts(103_590_000, 0) + // Minimum execution time: 84_648_000 picoseconds. + Weight::from_parts(89_332_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -263,10 +278,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `1841` + // Measured: `1807` // Estimated: `4365` - // Minimum execution time: 43_230_000 picoseconds. - Weight::from_parts(46_120_000, 0) + // Minimum execution time: 40_529_000 picoseconds. + Weight::from_parts(45_217_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -277,10 +292,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `1808` + // Measured: `1774` // Estimated: `4365` - // Minimum execution time: 43_092_000 picoseconds. - Weight::from_parts(46_018_000, 0) + // Minimum execution time: 40_894_000 picoseconds. + Weight::from_parts(45_726_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -293,10 +308,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `1824` + // Measured: `1790` // Estimated: `4365` - // Minimum execution time: 49_697_000 picoseconds. - Weight::from_parts(53_795_000, 0) + // Minimum execution time: 48_187_000 picoseconds. + Weight::from_parts(52_655_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -309,10 +324,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `1865` + // Measured: `1831` // Estimated: `4365` - // Minimum execution time: 50_417_000 picoseconds. - Weight::from_parts(53_214_000, 0) + // Minimum execution time: 47_548_000 picoseconds. + Weight::from_parts(51_547_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -323,10 +338,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `335` + // Measured: `300` // Estimated: `42428` - // Minimum execution time: 25_688_000 picoseconds. - Weight::from_parts(26_575_000, 0) + // Minimum execution time: 20_959_000 picoseconds. + Weight::from_parts(21_837_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -337,10 +352,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `383` + // Measured: `348` // Estimated: `42428` - // Minimum execution time: 26_230_000 picoseconds. - Weight::from_parts(27_235_000, 0) + // Minimum execution time: 21_628_000 picoseconds. + Weight::from_parts(22_192_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -349,10 +364,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `208` // Estimated: `4365` - // Minimum execution time: 17_585_000 picoseconds. - Weight::from_parts(18_225_000, 0) + // Minimum execution time: 12_309_000 picoseconds. + Weight::from_parts(12_644_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,10 +382,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `584` + // Measured: `549` // Estimated: `42428` - // Minimum execution time: 38_243_000 picoseconds. - Weight::from_parts(39_959_000, 0) + // Minimum execution time: 31_871_000 picoseconds. + Weight::from_parts(33_123_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -385,10 +400,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `719` + // Measured: `684` // Estimated: `42428` - // Minimum execution time: 88_424_000 picoseconds. - Weight::from_parts(92_969_000, 0) + // Minimum execution time: 73_715_000 picoseconds. + Weight::from_parts(79_980_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -401,10 +416,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `770` + // Measured: `735` // Estimated: `42428` - // Minimum execution time: 138_207_000 picoseconds. - Weight::from_parts(151_726_000, 0) + // Minimum execution time: 128_564_000 picoseconds. + Weight::from_parts(138_536_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -417,10 +432,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `755` + // Measured: `720` // Estimated: `42428` - // Minimum execution time: 131_001_000 picoseconds. - Weight::from_parts(148_651_000, 0) + // Minimum execution time: 129_775_000 picoseconds. + Weight::from_parts(139_001_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -433,10 +448,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `770` + // Measured: `735` // Estimated: `42428` - // Minimum execution time: 109_612_000 picoseconds. - Weight::from_parts(143_626_000, 0) + // Minimum execution time: 128_233_000 picoseconds. + Weight::from_parts(135_796_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -449,10 +464,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `776` + // Measured: `741` // Estimated: `42428` - // Minimum execution time: 71_754_000 picoseconds. - Weight::from_parts(77_329_000, 0) + // Minimum execution time: 66_995_000 picoseconds. + Weight::from_parts(72_678_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -467,10 +482,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `776` + // Measured: `741` // Estimated: `83866` - // Minimum execution time: 153_244_000 picoseconds. - Weight::from_parts(169_961_000, 0) + // Minimum execution time: 137_764_000 picoseconds. + Weight::from_parts(152_260_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -483,10 +498,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `772` + // Measured: `737` // Estimated: `42428` - // Minimum execution time: 137_997_000 picoseconds. - Weight::from_parts(157_862_000, 0) + // Minimum execution time: 119_992_000 picoseconds. + Weight::from_parts(134_805_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -495,16 +510,18 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `FellowshipReferenda::MetadataOf` (r:0 w:1) /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `458` + // Measured: `424` // Estimated: `4365` - // Minimum execution time: 21_794_000 picoseconds. - Weight::from_parts(22_341_000, 0) + // Minimum execution time: 20_927_000 picoseconds. + Weight::from_parts(21_802_000, 0) .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) @@ -513,10 +530,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `319` + // Measured: `285` // Estimated: `4365` - // Minimum execution time: 18_458_000 picoseconds. - Weight::from_parts(19_097_000, 0) + // Minimum execution time: 14_253_000 picoseconds. + Weight::from_parts(15_031_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs b/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs index b7cc5df28b91cc2cc36cade14784b41d7d34ff71..c35925198f9d0d3c615416aa8dbb3e8e371eecca 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs @@ -16,27 +16,28 @@ //! Autogenerated weights for `pallet_referenda` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_referenda // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_referenda -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -57,10 +58,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `324` + // Measured: `185` // Estimated: `42428` - // Minimum execution time: 39_852_000 picoseconds. - Weight::from_parts(41_610_000, 0) + // Minimum execution time: 28_612_000 picoseconds. + Weight::from_parts(30_060_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) @@ -69,15 +70,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `577` + // Measured: `438` // Estimated: `83866` - // Minimum execution time: 52_588_000 picoseconds. - Weight::from_parts(54_154_000, 0) + // Minimum execution time: 42_827_000 picoseconds. + Weight::from_parts(44_072_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) @@ -87,15 +90,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3334` + // Measured: `3225` // Estimated: `42428` - // Minimum execution time: 70_483_000 picoseconds. - Weight::from_parts(72_731_000, 0) + // Minimum execution time: 56_475_000 picoseconds. + Weight::from_parts(58_888_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) @@ -105,60 +110,62 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3354` + // Measured: `3245` // Estimated: `42428` - // Minimum execution time: 68_099_000 picoseconds. - Weight::from_parts(71_560_000, 0) + // Minimum execution time: 56_542_000 picoseconds. + Weight::from_parts(58_616_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `577` + // Measured: `438` // Estimated: `83866` - // Minimum execution time: 64_357_000 picoseconds. - Weight::from_parts(66_081_000, 0) + // Minimum execution time: 51_218_000 picoseconds. + Weight::from_parts(53_148_000, 0) .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `577` + // Measured: `438` // Estimated: `83866` - // Minimum execution time: 62_709_000 picoseconds. - Weight::from_parts(64_534_000, 0) + // Minimum execution time: 49_097_000 picoseconds. + Weight::from_parts(50_796_000, 0) .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `417` + // Measured: `279` // Estimated: `4401` - // Minimum execution time: 31_296_000 picoseconds. - Weight::from_parts(32_221_000, 0) + // Minimum execution time: 23_720_000 picoseconds. + Weight::from_parts(24_327_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -167,10 +174,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `407` + // Measured: `269` // Estimated: `4401` - // Minimum execution time: 31_209_000 picoseconds. - Weight::from_parts(32_168_000, 0) + // Minimum execution time: 24_089_000 picoseconds. + Weight::from_parts(24_556_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -179,15 +186,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `83866` - // Minimum execution time: 38_887_000 picoseconds. - Weight::from_parts(40_193_000, 0) + // Minimum execution time: 29_022_000 picoseconds. + Weight::from_parts(29_590_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) @@ -195,15 +204,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Referenda::MetadataOf` (r:1 w:0) /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `726` + // Measured: `587` // Estimated: `83866` - // Minimum execution time: 106_054_000 picoseconds. - Weight::from_parts(108_318_000, 0) + // Minimum execution time: 81_920_000 picoseconds. + Weight::from_parts(84_492_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::TrackQueue` (r:1 w:0) /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) @@ -211,10 +222,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `240` + // Measured: `102` // Estimated: `5477` - // Minimum execution time: 9_263_000 picoseconds. - Weight::from_parts(9_763_000, 0) + // Minimum execution time: 8_134_000 picoseconds. + Weight::from_parts(8_574_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -223,36 +234,32 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `3254` + // Measured: `3115` // Estimated: `42428` - // Minimum execution time: 50_080_000 picoseconds. - Weight::from_parts(51_858_000, 0) + // Minimum execution time: 39_932_000 picoseconds. + Weight::from_parts(42_086_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::TrackQueue` (r:1 w:1) /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `3254` + // Measured: `3115` // Estimated: `42428` - // Minimum execution time: 53_889_000 picoseconds. - Weight::from_parts(55_959_000, 0) + // Minimum execution time: 42_727_000 picoseconds. + Weight::from_parts(44_280_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) @@ -261,10 +268,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `3077` + // Measured: `2939` // Estimated: `5477` - // Minimum execution time: 23_266_000 picoseconds. - Weight::from_parts(24_624_000, 0) + // Minimum execution time: 20_918_000 picoseconds. + Weight::from_parts(22_180_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -275,10 +282,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `3077` + // Measured: `2939` // Estimated: `5477` - // Minimum execution time: 22_846_000 picoseconds. - Weight::from_parts(24_793_000, 0) + // Minimum execution time: 20_943_000 picoseconds. + Weight::from_parts(21_932_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -291,10 +298,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3081` + // Measured: `2943` // Estimated: `5477` - // Minimum execution time: 28_284_000 picoseconds. - Weight::from_parts(29_940_000, 0) + // Minimum execution time: 25_197_000 picoseconds. + Weight::from_parts(26_083_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -307,10 +314,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3101` + // Measured: `2963` // Estimated: `5477` - // Minimum execution time: 28_133_000 picoseconds. - Weight::from_parts(29_638_000, 0) + // Minimum execution time: 24_969_000 picoseconds. + Weight::from_parts(26_096_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -321,10 +328,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `437` + // Measured: `298` // Estimated: `42428` - // Minimum execution time: 25_710_000 picoseconds. - Weight::from_parts(26_500_000, 0) + // Minimum execution time: 18_050_000 picoseconds. + Weight::from_parts(18_790_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -335,10 +342,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `42428` - // Minimum execution time: 25_935_000 picoseconds. - Weight::from_parts(26_803_000, 0) + // Minimum execution time: 18_357_000 picoseconds. + Weight::from_parts(18_957_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -347,10 +354,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `344` + // Measured: `206` // Estimated: `4401` - // Minimum execution time: 17_390_000 picoseconds. - Weight::from_parts(18_042_000, 0) + // Minimum execution time: 11_479_000 picoseconds. + Weight::from_parts(11_968_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -359,150 +366,136 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `42428` - // Minimum execution time: 35_141_000 picoseconds. - Weight::from_parts(36_318_000, 0) + // Minimum execution time: 24_471_000 picoseconds. + Weight::from_parts(25_440_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `42428` - // Minimum execution time: 37_815_000 picoseconds. - Weight::from_parts(39_243_000, 0) + // Minimum execution time: 26_580_000 picoseconds. + Weight::from_parts(27_570_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `538` + // Measured: `399` // Estimated: `42428` - // Minimum execution time: 30_779_000 picoseconds. - Weight::from_parts(31_845_000, 0) + // Minimum execution time: 24_331_000 picoseconds. + Weight::from_parts(25_291_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `521` + // Measured: `382` // Estimated: `42428` - // Minimum execution time: 31_908_000 picoseconds. - Weight::from_parts(33_253_000, 0) + // Minimum execution time: 24_768_000 picoseconds. + Weight::from_parts(25_746_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `538` + // Measured: `399` // Estimated: `42428` - // Minimum execution time: 28_951_000 picoseconds. - Weight::from_parts(30_004_000, 0) + // Minimum execution time: 23_171_000 picoseconds. + Weight::from_parts(24_161_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `542` + // Measured: `403` // Estimated: `42428` - // Minimum execution time: 27_750_000 picoseconds. - Weight::from_parts(28_588_000, 0) + // Minimum execution time: 22_263_000 picoseconds. + Weight::from_parts(23_062_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `542` + // Measured: `403` // Estimated: `83866` - // Minimum execution time: 43_950_000 picoseconds. - Weight::from_parts(46_164_000, 0) + // Minimum execution time: 33_710_000 picoseconds. + Weight::from_parts(34_871_000, 0) .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `538` + // Measured: `399` // Estimated: `42428` - // Minimum execution time: 31_050_000 picoseconds. - Weight::from_parts(32_169_000, 0) + // Minimum execution time: 24_260_000 picoseconds. + Weight::from_parts(25_104_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Referenda::MetadataOf` (r:0 w:1) /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `560` + // Measured: `422` // Estimated: `4401` - // Minimum execution time: 21_193_000 picoseconds. - Weight::from_parts(22_116_000, 0) + // Minimum execution time: 19_821_000 picoseconds. + Weight::from_parts(20_641_000, 0) .saturating_add(Weight::from_parts(0, 4401)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) @@ -511,10 +504,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `421` + // Measured: `283` // Estimated: `4401` - // Minimum execution time: 18_065_000 picoseconds. - Weight::from_parts(18_781_000, 0) + // Minimum execution time: 13_411_000 picoseconds. + Weight::from_parts(14_070_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs b/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs index 0f36dbd384df87d5a90c2a11392923e7ae8633aa..5f6b41d2b54ea95b398877b0ebcaccfbfce313ac 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_scheduler` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_scheduler // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_scheduler -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -54,8 +56,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `68` // Estimated: `1489` - // Minimum execution time: 2_869_000 picoseconds. - Weight::from_parts(3_109_000, 0) + // Minimum execution time: 3_114_000 picoseconds. + Weight::from_parts(3_245_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -67,11 +69,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 3_326_000 picoseconds. - Weight::from_parts(5_818_563, 0) + // Minimum execution time: 3_430_000 picoseconds. + Weight::from_parts(6_250_920, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 1_261 - .saturating_add(Weight::from_parts(336_446, 0).saturating_mul(s.into())) + // Standard Error: 1_350 + .saturating_add(Weight::from_parts(333_245, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -79,8 +81,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_007_000 picoseconds. - Weight::from_parts(3_197_000, 0) + // Minimum execution time: 3_166_000 picoseconds. + Weight::from_parts(3_295_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `Preimage::PreimageFor` (r:1 w:1) @@ -94,11 +96,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `251 + s * (1 ±0)` // Estimated: `3716 + s * (1 ±0)` - // Minimum execution time: 16_590_000 picoseconds. - Weight::from_parts(16_869_000, 0) + // Minimum execution time: 17_072_000 picoseconds. + Weight::from_parts(17_393_000, 0) .saturating_add(Weight::from_parts(0, 3716)) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_308, 0).saturating_mul(s.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_204, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) @@ -109,8 +111,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_320_000 picoseconds. - Weight::from_parts(4_594_000, 0) + // Minimum execution time: 4_566_000 picoseconds. + Weight::from_parts(4_775_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,24 +120,24 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_956_000 picoseconds. - Weight::from_parts(3_216_000, 0) + // Minimum execution time: 3_180_000 picoseconds. + Weight::from_parts(3_339_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_dispatch_signed() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_824_000 picoseconds. - Weight::from_parts(1_929_000, 0) + // Minimum execution time: 1_656_000 picoseconds. + Weight::from_parts(1_829_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_749_000 picoseconds. - Weight::from_parts(1_916_000, 0) + // Minimum execution time: 1_628_000 picoseconds. + Weight::from_parts(1_840_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `Scheduler::Agenda` (r:1 w:1) @@ -145,16 +147,18 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 9_086_000 picoseconds. - Weight::from_parts(11_733_696, 0) + // Minimum execution time: 9_523_000 picoseconds. + Weight::from_parts(12_482_434, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 1_362 - .saturating_add(Weight::from_parts(375_266, 0).saturating_mul(s.into())) + // Standard Error: 1_663 + .saturating_add(Weight::from_parts(370_122, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:0 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. @@ -162,13 +166,13 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 12_716_000 picoseconds. - Weight::from_parts(12_529_180, 0) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(14_705_132, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 867 - .saturating_add(Weight::from_parts(548_188, 0).saturating_mul(s.into())) + // Standard Error: 1_126 + .saturating_add(Weight::from_parts(547_438, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) @@ -179,11 +183,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292 + s * (185 ±0)` // Estimated: `42428` - // Minimum execution time: 12_053_000 picoseconds. - Weight::from_parts(15_358_056, 0) + // Minimum execution time: 12_335_000 picoseconds. + Weight::from_parts(16_144_217, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 3_176 - .saturating_add(Weight::from_parts(421_589, 0).saturating_mul(s.into())) + // Standard Error: 3_533 + .saturating_add(Weight::from_parts(413_823, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -191,49 +195,48 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. fn cancel_named(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `318 + s * (185 ±0)` // Estimated: `42428` - // Minimum execution time: 14_803_000 picoseconds. - Weight::from_parts(15_805_714, 0) + // Minimum execution time: 16_906_000 picoseconds. + Weight::from_parts(17_846_662, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 2_597 - .saturating_add(Weight::from_parts(611_053, 0).saturating_mul(s.into())) + // Standard Error: 2_687 + .saturating_add(Weight::from_parts(613_356, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `Scheduler::Retries` (r:1 w:2) - /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Lookup` (r:0 w:1) - /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. fn schedule_retry(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `196` + // Measured: `155` // Estimated: `42428` - // Minimum execution time: 13_156_000 picoseconds. - Weight::from_parts(13_801_287, 0) + // Minimum execution time: 8_988_000 picoseconds. + Weight::from_parts(9_527_838, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 568 - .saturating_add(Weight::from_parts(35_441, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) + // Standard Error: 523 + .saturating_add(Weight::from_parts(25_453, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Scheduler::Agenda` (r:1 w:0) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn set_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `115 + s * (177 ±0)` + // Measured: `8965` // Estimated: `42428` - // Minimum execution time: 7_912_000 picoseconds. - Weight::from_parts(8_081_460, 0) + // Minimum execution time: 23_337_000 picoseconds. + Weight::from_parts(24_255_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -244,13 +247,12 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn set_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `324 + s * (185 ±0)` + // Measured: `9643` // Estimated: `42428` - // Minimum execution time: 10_673_000 picoseconds. - Weight::from_parts(12_212_185, 0) + // Minimum execution time: 30_704_000 picoseconds. + Weight::from_parts(31_646_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -259,13 +261,12 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn cancel_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `115 + s * (177 ±0)` + // Measured: `8977` // Estimated: `42428` - // Minimum execution time: 7_912_000 picoseconds. - Weight::from_parts(8_081_460, 0) + // Minimum execution time: 22_279_000 picoseconds. + Weight::from_parts(23_106_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -276,13 +277,12 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn cancel_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `324 + s * (185 ±0)` + // Measured: `9655` // Estimated: `42428` - // Minimum execution time: 10_673_000 picoseconds. - Weight::from_parts(12_212_185, 0) + // Minimum execution time: 29_649_000 picoseconds. + Weight::from_parts(30_472_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_sudo.rs b/polkadot/runtime/rococo/src/weights/pallet_sudo.rs index 694174954fc78b1eeb4d93f78e6b26d5bab22b83..ecc31dc3fa9df6c17a5541fcd5a6d957f6ca2e2d 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_sudo.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_sudo.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_sudo` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_sudo // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_sudo -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -54,8 +56,8 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 8_432_000 picoseconds. - Weight::from_parts(8_757_000, 0) + // Minimum execution time: 8_336_000 picoseconds. + Weight::from_parts(8_569_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -66,8 +68,8 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 9_167_000 picoseconds. - Weight::from_parts(9_397_000, 0) + // Minimum execution time: 8_858_000 picoseconds. + Weight::from_parts(9_238_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -77,8 +79,8 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 9_133_000 picoseconds. - Weight::from_parts(9_573_000, 0) + // Minimum execution time: 8_921_000 picoseconds. + Weight::from_parts(9_324_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -88,10 +90,21 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 7_374_000 picoseconds. - Weight::from_parts(7_702_000, 0) + // Minimum execution time: 7_398_000 picoseconds. + Weight::from_parts(7_869_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 3_146_000 picoseconds. + Weight::from_parts(3_314_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs b/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs index 1bb2e227ab7d540ba8cfd9e4609e957bf025d57c..7d79621b9e65f959221fe382254db93db6237ff7 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_timestamp` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_timestamp // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,26 +50,26 @@ use core::marker::PhantomData; /// Weight functions for `pallet_timestamp`. pub struct WeightInfo(PhantomData); impl pallet_timestamp::WeightInfo for WeightInfo { - /// Storage: Timestamp Now (r:1 w:1) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe CurrentSlot (r:1 w:0) - /// Proof: Babe CurrentSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::CurrentSlot` (r:1 w:0) + /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set() -> Weight { // Proof Size summary in bytes: - // Measured: `311` + // Measured: `137` // Estimated: `1493` - // Minimum execution time: 10_103_000 picoseconds. - Weight::from_parts(10_597_000, 0) + // Minimum execution time: 5_596_000 picoseconds. + Weight::from_parts(5_823_000, 0) .saturating_add(Weight::from_parts(0, 1493)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } fn on_finalize() -> Weight { // Proof Size summary in bytes: - // Measured: `94` + // Measured: `57` // Estimated: `0` - // Minimum execution time: 4_718_000 picoseconds. - Weight::from_parts(4_839_000, 0) + // Minimum execution time: 2_777_000 picoseconds. + Weight::from_parts(2_900_000, 0) .saturating_add(Weight::from_parts(0, 0)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_transaction_payment.rs b/polkadot/runtime/rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..44dfab289fb2dd22fd6bc81cfaae81769d14313f --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,68 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_transaction_payment +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `252` + // Estimated: `1737` + // Minimum execution time: 33_070_000 picoseconds. + Weight::from_parts(33_730_000, 0) + .saturating_add(Weight::from_parts(0, 1737)) + .saturating_add(T::DbWeight::get().reads(3)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs index 144e9d5b872382b7381e2d77c6fb08a8fbece4fa..acf09989afca1fe41fb5648d3b500116b1acdeed 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs @@ -16,25 +16,28 @@ //! Autogenerated weights for `pallet_treasury` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-07, STEPS: `50`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/debug/polkadot +// ./target/production/polkadot // benchmark // pallet // --chain=rococo-dev // --steps=50 -// --repeat=2 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_treasury // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./runtime/rococo/src/weights/ -// --header=./file_header.txt +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,176 +50,168 @@ use core::marker::PhantomData; /// Weight functions for `pallet_treasury`. pub struct WeightInfo(PhantomData); impl pallet_treasury::WeightInfo for WeightInfo { - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn spend_local() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `142` // Estimated: `1887` - // Minimum execution time: 177_000_000 picoseconds. - Weight::from_parts(191_000_000, 0) + // Minimum execution time: 9_928_000 picoseconds. + Weight::from_parts(10_560_000, 0) .saturating_add(Weight::from_parts(0, 1887)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn propose_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `143` + // Measured: `243` // Estimated: `1489` - // Minimum execution time: 354_000_000 picoseconds. - Weight::from_parts(376_000_000, 0) + // Minimum execution time: 20_714_000 picoseconds. + Weight::from_parts(21_137_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Treasury Proposals (r:1 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Treasury::Proposals` (r:1 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn reject_proposal() -> Weight { // Proof Size summary in bytes: - // Measured: `301` + // Measured: `401` // Estimated: `3593` - // Minimum execution time: 547_000_000 picoseconds. - Weight::from_parts(550_000_000, 0) + // Minimum execution time: 31_665_000 picoseconds. + Weight::from_parts(32_442_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Treasury Proposals (r:1 w:0) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Proposals` (r:1 w:0) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn approve_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `470 + p * (8 ±0)` + // Measured: `570 + p * (8 ±0)` // Estimated: `3573` - // Minimum execution time: 104_000_000 picoseconds. - Weight::from_parts(121_184_402, 0) + // Minimum execution time: 6_988_000 picoseconds. + Weight::from_parts(11_464_972, 0) .saturating_add(Weight::from_parts(0, 3573)) - // Standard Error: 42_854 - .saturating_add(Weight::from_parts(153_112, 0).saturating_mul(p.into())) + // Standard Error: 1_722 + .saturating_add(Weight::from_parts(84_536, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn remove_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `127` + // Measured: `227` // Estimated: `1887` - // Minimum execution time: 80_000_000 picoseconds. - Weight::from_parts(82_000_000, 0) + // Minimum execution time: 5_386_000 picoseconds. + Weight::from_parts(5_585_000, 0) .saturating_add(Weight::from_parts(0, 1887)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Treasury Deactivated (r:1 w:1) - /// Proof: Treasury Deactivated (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:1) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:99 w:99) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:199 w:199) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Deactivated` (r:1 w:1) + /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:99 w:99) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:199 w:199) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `331 + p * (251 ±0)` + // Measured: `431 + p * (251 ±0)` // Estimated: `3593 + p * (5206 ±0)` - // Minimum execution time: 887_000_000 picoseconds. - Weight::from_parts(828_616_021, 0) + // Minimum execution time: 43_737_000 picoseconds. + Weight::from_parts(39_883_021, 0) .saturating_add(Weight::from_parts(0, 3593)) - // Standard Error: 695_351 - .saturating_add(Weight::from_parts(566_114_524, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Standard Error: 12_917 + .saturating_add(Weight::from_parts(31_796_205, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:0) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) - /// Storage: Treasury SpendCount (r:1 w:1) - /// Proof: Treasury SpendCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Spends (r:0 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:0) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) + /// Storage: `Treasury::SpendCount` (r:1 w:1) + /// Proof: `Treasury::SpendCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Spends` (r:0 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) fn spend() -> Weight { // Proof Size summary in bytes: - // Measured: `114` - // Estimated: `4702` - // Minimum execution time: 208_000_000 picoseconds. - Weight::from_parts(222_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `215` + // Estimated: `4703` + // Minimum execution time: 16_829_000 picoseconds. + Weight::from_parts(17_251_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) - /// Storage: XcmPallet QueryCounter (r:1 w:1) - /// Proof Skipped: XcmPallet QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `737` - // Estimated: `5313` - // Minimum execution time: 551_000_000 picoseconds. - Weight::from_parts(569_000_000, 0) - .saturating_add(Weight::from_parts(0, 5313)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(6)) + // Measured: `458` + // Estimated: `5318` + // Minimum execution time: 41_554_000 picoseconds. + Weight::from_parts(42_451_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) - /// Storage: XcmPallet Queries (r:1 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::Queries` (r:1 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn check_status() -> Weight { // Proof Size summary in bytes: - // Measured: `442` - // Estimated: `5313` - // Minimum execution time: 245_000_000 picoseconds. - Weight::from_parts(281_000_000, 0) - .saturating_add(Weight::from_parts(0, 5313)) + // Measured: `306` + // Estimated: `5318` + // Minimum execution time: 22_546_000 picoseconds. + Weight::from_parts(23_151_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) fn void_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `5313` - // Minimum execution time: 147_000_000 picoseconds. - Weight::from_parts(160_000_000, 0) - .saturating_add(Weight::from_parts(0, 5313)) + // Measured: `278` + // Estimated: `5318` + // Minimum execution time: 12_169_000 picoseconds. + Weight::from_parts(12_484_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_utility.rs b/polkadot/runtime/rococo/src/weights/pallet_utility.rs index f50f60eaad7fec2c09e9d5620db0551f1c7b1364..6f2a374247f8f28d1deb281b8256393a74294fad 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_utility.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_utility.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_utility` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_utility // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,18 +55,18 @@ impl pallet_utility::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_738_000 picoseconds. - Weight::from_parts(2_704_821, 0) + // Minimum execution time: 4_041_000 picoseconds. + Weight::from_parts(5_685_496, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_999 - .saturating_add(Weight::from_parts(4_627_278, 0).saturating_mul(c.into())) + // Standard Error: 810 + .saturating_add(Weight::from_parts(3_177_197, 0).saturating_mul(c.into())) } fn as_derivative() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_294_000 picoseconds. - Weight::from_parts(5_467_000, 0) + // Minimum execution time: 3_667_000 picoseconds. + Weight::from_parts(3_871_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `c` is `[0, 1000]`. @@ -71,18 +74,18 @@ impl pallet_utility::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_828_000 picoseconds. - Weight::from_parts(4_650_678, 0) + // Minimum execution time: 4_116_000 picoseconds. + Weight::from_parts(6_453_932, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_789 - .saturating_add(Weight::from_parts(4_885_004, 0).saturating_mul(c.into())) + // Standard Error: 825 + .saturating_add(Weight::from_parts(3_366_112, 0).saturating_mul(c.into())) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_020_000 picoseconds. - Weight::from_parts(9_205_000, 0) + // Minimum execution time: 5_630_000 picoseconds. + Weight::from_parts(5_956_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `c` is `[0, 1000]`. @@ -90,10 +93,10 @@ impl pallet_utility::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_852_000 picoseconds. - Weight::from_parts(20_703_134, 0) + // Minimum execution time: 4_165_000 picoseconds. + Weight::from_parts(5_442_561, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3_924 - .saturating_add(Weight::from_parts(4_604_529, 0).saturating_mul(c.into())) + // Standard Error: 460 + .saturating_add(Weight::from_parts(3_173_577, 0).saturating_mul(c.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_vesting.rs b/polkadot/runtime/rococo/src/weights/pallet_vesting.rs index 2596207d5837df6776177b71480c57af61ac02ce..c21ab0877742019b238adf33bec4a378d4cb82ea 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_vesting.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_vesting.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_vesting` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_vesting // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,143 +50,143 @@ use core::marker::PhantomData; /// Weight functions for `pallet_vesting`. pub struct WeightInfo(PhantomData); impl pallet_vesting::WeightInfo for WeightInfo { - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `277 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 32_820_000 picoseconds. - Weight::from_parts(31_640_992, 0) + // Minimum execution time: 29_288_000 picoseconds. + Weight::from_parts(29_095_507, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 449 - .saturating_add(Weight::from_parts(45_254, 0).saturating_mul(l.into())) - // Standard Error: 800 - .saturating_add(Weight::from_parts(72_178, 0).saturating_mul(s.into())) + // Standard Error: 1_679 + .saturating_add(Weight::from_parts(33_164, 0).saturating_mul(l.into())) + // Standard Error: 2_988 + .saturating_add(Weight::from_parts(67_092, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `277 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_054_000 picoseconds. - Weight::from_parts(35_825_428, 0) + // Minimum execution time: 31_003_000 picoseconds. + Weight::from_parts(30_528_438, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 749 - .saturating_add(Weight::from_parts(31_738, 0).saturating_mul(l.into())) - // Standard Error: 1_333 - .saturating_add(Weight::from_parts(40_580, 0).saturating_mul(s.into())) + // Standard Error: 1_586 + .saturating_add(Weight::from_parts(35_429, 0).saturating_mul(l.into())) + // Standard Error: 2_823 + .saturating_add(Weight::from_parts(76_505, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `380 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 35_440_000 picoseconds. - Weight::from_parts(34_652_647, 0) + // Minimum execution time: 31_269_000 picoseconds. + Weight::from_parts(30_661_898, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 517 - .saturating_add(Weight::from_parts(41_942, 0).saturating_mul(l.into())) - // Standard Error: 920 - .saturating_add(Weight::from_parts(66_074, 0).saturating_mul(s.into())) + // Standard Error: 1_394 + .saturating_add(Weight::from_parts(39_300, 0).saturating_mul(l.into())) + // Standard Error: 2_480 + .saturating_add(Weight::from_parts(78_849, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `380 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 38_880_000 picoseconds. - Weight::from_parts(39_625_819, 0) + // Minimum execution time: 33_040_000 picoseconds. + Weight::from_parts(32_469_674, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 1_032 - .saturating_add(Weight::from_parts(29_856, 0).saturating_mul(l.into())) - // Standard Error: 1_837 - .saturating_add(Weight::from_parts(6_210, 0).saturating_mul(s.into())) + // Standard Error: 1_418 + .saturating_add(Weight::from_parts(44_206, 0).saturating_mul(l.into())) + // Standard Error: 2_523 + .saturating_add(Weight::from_parts(74_224, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `451 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 68_294_000 picoseconds. - Weight::from_parts(68_313_394, 0) + // Minimum execution time: 62_032_000 picoseconds. + Weight::from_parts(63_305_621, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 983 - .saturating_add(Weight::from_parts(48_156, 0).saturating_mul(l.into())) - // Standard Error: 1_750 - .saturating_add(Weight::from_parts(87_719, 0).saturating_mul(s.into())) + // Standard Error: 2_277 + .saturating_add(Weight::from_parts(42_767, 0).saturating_mul(l.into())) + // Standard Error: 4_051 + .saturating_add(Weight::from_parts(65_487, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `554 + l * (25 ±0) + s * (36 ±0)` // Estimated: `6196` - // Minimum execution time: 70_529_000 picoseconds. - Weight::from_parts(70_619_962, 0) + // Minimum execution time: 63_303_000 picoseconds. + Weight::from_parts(65_180_847, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 1_259 - .saturating_add(Weight::from_parts(50_685, 0).saturating_mul(l.into())) - // Standard Error: 2_241 - .saturating_add(Weight::from_parts(91_444, 0).saturating_mul(s.into())) + // Standard Error: 2_220 + .saturating_add(Weight::from_parts(28_829, 0).saturating_mul(l.into())) + // Standard Error: 3_951 + .saturating_add(Weight::from_parts(84_970, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -192,59 +195,70 @@ impl pallet_vesting::WeightInfo for WeightInfo { /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. - fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `555 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `4764` - // Minimum execution time: 41_497_000 picoseconds. - Weight::from_parts(38_763_834, 4764) - // Standard Error: 2_030 - .saturating_add(Weight::from_parts(99_580, 0).saturating_mul(l.into())) - // Standard Error: 3_750 - .saturating_add(Weight::from_parts(132_188, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `378 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_428_000 picoseconds. - Weight::from_parts(35_604_430, 0) + // Minimum execution time: 31_440_000 picoseconds. + Weight::from_parts(30_773_053, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 504 - .saturating_add(Weight::from_parts(43_191, 0).saturating_mul(l.into())) - // Standard Error: 931 - .saturating_add(Weight::from_parts(66_795, 0).saturating_mul(s.into())) + // Standard Error: 1_474 + .saturating_add(Weight::from_parts(43_019, 0).saturating_mul(l.into())) + // Standard Error: 2_723 + .saturating_add(Weight::from_parts(73_360, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `378 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 40_696_000 picoseconds. - Weight::from_parts(39_741_284, 0) + // Minimum execution time: 34_221_000 picoseconds. + Weight::from_parts(33_201_125, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 1_751 + .saturating_add(Weight::from_parts(44_088, 0).saturating_mul(l.into())) + // Standard Error: 3_234 + .saturating_add(Weight::from_parts(86_228, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. + fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `451 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 35_553_000 picoseconds. + Weight::from_parts(34_974_083, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 478 - .saturating_add(Weight::from_parts(43_792, 0).saturating_mul(l.into())) - // Standard Error: 883 - .saturating_add(Weight::from_parts(66_540, 0).saturating_mul(s.into())) + // Standard Error: 1_560 + .saturating_add(Weight::from_parts(34_615, 0).saturating_mul(l.into())) + // Standard Error: 2_882 + .saturating_add(Weight::from_parts(83_419, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs b/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs index 7c307deec4c6f8480019b5559117cfc47ee84b40..ec67268d1449952be992bd321154dd676c2a8a4d 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs @@ -16,26 +16,28 @@ //! Autogenerated weights for `pallet_whitelist` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-aahe6cbd-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_whitelist // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_whitelist -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,67 +52,75 @@ pub struct WeightInfo(PhantomData); impl pallet_whitelist::WeightInfo for WeightInfo { /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn whitelist_call() -> Weight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3556` - // Minimum execution time: 20_035_000 picoseconds. - Weight::from_parts(20_452_000, 0) + // Minimum execution time: 16_686_000 picoseconds. + Weight::from_parts(17_042_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn remove_whitelisted_call() -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `3556` - // Minimum execution time: 20_247_000 picoseconds. - Weight::from_parts(20_808_000, 0) + // Minimum execution time: 18_250_000 picoseconds. + Weight::from_parts(19_026_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:1 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 4194294]`. fn dispatch_whitelisted_call(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `428 + n * (1 ±0)` // Estimated: `3892 + n * (1 ±0)` - // Minimum execution time: 32_633_000 picoseconds. - Weight::from_parts(32_855_000, 0) + // Minimum execution time: 28_741_000 picoseconds. + Weight::from_parts(29_024_000, 0) .saturating_add(Weight::from_parts(0, 3892)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_223, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(3)) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_305, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 10000]`. fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `3556` - // Minimum execution time: 23_833_000 picoseconds. - Weight::from_parts(24_698_994, 0) + // Minimum execution time: 21_670_000 picoseconds. + Weight::from_parts(22_561_364, 0) .saturating_add(Weight::from_parts(0, 3556)) // Standard Error: 4 - .saturating_add(Weight::from_parts(1_454, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(Weight::from_parts(1_468, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index 177407ef7088b5c9bdcc460f36cb7d6485743a71..d5cf33515e6be23e7cde0d892b97c8c1d07d8c2f 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_xcm // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -48,10 +50,6 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - fn transfer_assets() -> Weight { - // TODO: run benchmarks - Weight::zero() - } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) @@ -62,36 +60,80 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 27_328_000 picoseconds. - Weight::from_parts(27_976_000, 0) - .saturating_add(Weight::from_parts(0, 3607)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 25_521_000 picoseconds. + Weight::from_parts(25_922_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 16_280_000 picoseconds. - Weight::from_parts(16_904_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 112_185_000 picoseconds. + Weight::from_parts(115_991_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 15_869_000 picoseconds. - Weight::from_parts(16_264_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `232` + // Estimated: `3697` + // Minimum execution time: 108_693_000 picoseconds. + Weight::from_parts(111_853_000, 0) + .saturating_add(Weight::from_parts(0, 3697)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 113_040_000 picoseconds. + Weight::from_parts(115_635_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_923_000 picoseconds. - Weight::from_parts(7_432_000, 0) + // Minimum execution time: 6_979_000 picoseconds. + Weight::from_parts(7_342_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) @@ -100,8 +142,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_333_000 picoseconds. - Weight::from_parts(7_566_000, 0) + // Minimum execution time: 7_144_000 picoseconds. + Weight::from_parts(7_297_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -109,8 +151,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_219_000 picoseconds. - Weight::from_parts(2_375_000, 0) + // Minimum execution time: 1_886_000 picoseconds. + Weight::from_parts(1_995_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -129,11 +171,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 30_650_000 picoseconds. - Weight::from_parts(31_683_000, 0) - .saturating_add(Weight::from_parts(0, 3607)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 31_238_000 picoseconds. + Weight::from_parts(31_955_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -151,11 +193,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `322` - // Estimated: `3787` - // Minimum execution time: 37_666_000 picoseconds. - Weight::from_parts(38_920_000, 0) - .saturating_add(Weight::from_parts(0, 3787)) + // Measured: `360` + // Estimated: `3825` + // Minimum execution time: 37_237_000 picoseconds. + Weight::from_parts(38_569_000, 0) + .saturating_add(Weight::from_parts(0, 3825)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -165,45 +207,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_244_000 picoseconds. - Weight::from_parts(2_425_000, 0) + // Minimum execution time: 1_884_000 picoseconds. + Weight::from_parts(2_028_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmPallet::SupportedVersion` (r:4 w:2) + /// Storage: `XcmPallet::SupportedVersion` (r:5 w:2) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `26` - // Estimated: `10916` - // Minimum execution time: 14_710_000 picoseconds. - Weight::from_parts(15_156_000, 0) - .saturating_add(Weight::from_parts(0, 10916)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `22` + // Estimated: `13387` + // Minimum execution time: 16_048_000 picoseconds. + Weight::from_parts(16_617_000, 0) + .saturating_add(Weight::from_parts(0, 13387)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifiers` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifiers` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `30` - // Estimated: `10920` - // Minimum execution time: 14_630_000 picoseconds. - Weight::from_parts(15_290_000, 0) - .saturating_add(Weight::from_parts(0, 10920)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `26` + // Estimated: `13391` + // Minimum execution time: 16_073_000 picoseconds. + Weight::from_parts(16_672_000, 0) + .saturating_add(Weight::from_parts(0, 13391)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `40` - // Estimated: `13405` - // Minimum execution time: 16_686_000 picoseconds. - Weight::from_parts(17_332_000, 0) - .saturating_add(Weight::from_parts(0, 13405)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15880` + // Minimum execution time: 18_422_000 picoseconds. + Weight::from_parts(18_900_000, 0) + .saturating_add(Weight::from_parts(0, 15880)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -217,38 +259,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `178` - // Estimated: `6118` - // Minimum execution time: 30_180_000 picoseconds. - Weight::from_parts(31_351_000, 0) - .saturating_add(Weight::from_parts(0, 6118)) + // Measured: `216` + // Estimated: `6156` + // Minimum execution time: 30_373_000 picoseconds. + Weight::from_parts(30_972_000, 0) + .saturating_add(Weight::from_parts(0, 6156)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:3 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `69` - // Estimated: `8484` - // Minimum execution time: 9_624_000 picoseconds. - Weight::from_parts(10_029_000, 0) - .saturating_add(Weight::from_parts(0, 8484)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `10959` + // Minimum execution time: 11_863_000 picoseconds. + Weight::from_parts(12_270_000, 0) + .saturating_add(Weight::from_parts(0, 10959)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `37` - // Estimated: `10927` - // Minimum execution time: 15_139_000 picoseconds. - Weight::from_parts(15_575_000, 0) - .saturating_add(Weight::from_parts(0, 10927)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `33` + // Estimated: `13398` + // Minimum execution time: 16_733_000 picoseconds. + Weight::from_parts(17_094_000, 0) + .saturating_add(Weight::from_parts(0, 13398)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -260,12 +302,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `182` - // Estimated: `11072` - // Minimum execution time: 37_871_000 picoseconds. - Weight::from_parts(38_940_000, 0) - .saturating_add(Weight::from_parts(0, 11072)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `216` + // Estimated: `13581` + // Minimum execution time: 39_236_000 picoseconds. + Weight::from_parts(40_587_000, 0) + .saturating_add(Weight::from_parts(0, 13581)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) @@ -276,8 +318,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_732_000 picoseconds. - Weight::from_parts(2_892_000, 0) + // Minimum execution time: 2_145_000 picoseconds. + Weight::from_parts(2_255_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -288,10 +330,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 23_813_000 picoseconds. - Weight::from_parts(24_201_000, 0) + // Minimum execution time: 22_518_000 picoseconds. + Weight::from_parts(22_926_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 34_438_000 picoseconds. + Weight::from_parts(35_514_000, 0) + .saturating_add(Weight::from_parts(0, 3488)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs new file mode 100644 index 0000000000000000000000000000000000000000..dc5e5d8ca4b1ca7ba0cceb6fdd8ddedc0d7e3c28 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs @@ -0,0 +1,191 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_xcm_benchmarks::fungible +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm_benchmarks::fungible`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm_benchmarks::fungible::WeightInfo for WeightInfo { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn withdraw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 27_223_000 picoseconds. + Weight::from_parts(27_947_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `6196` + // Minimum execution time: 36_502_000 picoseconds. + Weight::from_parts(37_023_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn transfer_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `6196` + // Minimum execution time: 85_152_000 picoseconds. + Weight::from_parts(86_442_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_asset_deposited() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn initiate_reserve_withdraw() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 56_571_000 picoseconds. + Weight::from_parts(58_163_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn receive_teleported_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 27_411_000 picoseconds. + Weight::from_parts(27_953_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn deposit_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 20_776_000 picoseconds. + Weight::from_parts(21_145_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn deposit_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 51_738_000 picoseconds. + Weight::from_parts(53_251_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn initiate_teleport() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 39_333_000 picoseconds. + Weight::from_parts(40_515_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs new file mode 100644 index 0000000000000000000000000000000000000000..b62f36172baf545ac83e55137f11e6bd5e5ad74f --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs @@ -0,0 +1,347 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::generic` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_xcm_benchmarks::generic +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm_benchmarks::generic`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm_benchmarks::generic::WeightInfo for WeightInfo { + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn report_holding() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 55_210_000 picoseconds. + Weight::from_parts(56_613_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + fn buy_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_246_000 picoseconds. + Weight::from_parts(1_339_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `XcmPallet::Queries` (r:1 w:0) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn query_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 5_377_000 picoseconds. + Weight::from_parts(5_549_000, 0) + .saturating_add(Weight::from_parts(0, 3465)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn transact() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_008_000 picoseconds. + Weight::from_parts(7_361_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn refund_surplus() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_700_000 picoseconds. + Weight::from_parts(1_848_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_error_handler() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_198_000 picoseconds. + Weight::from_parts(1_265_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_appendix() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_197_000 picoseconds. + Weight::from_parts(1_267_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn clear_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_193_000 picoseconds. + Weight::from_parts(1_258_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn descend_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_268_000 picoseconds. + Weight::from_parts(1_342_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn clear_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_173_000 picoseconds. + Weight::from_parts(1_248_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn report_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 53_715_000 picoseconds. + Weight::from_parts(54_860_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 8_621_000 picoseconds. + Weight::from_parts(8_903_000, 0) + .saturating_add(Weight::from_parts(0, 3488)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn trap() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_211_000 picoseconds. + Weight::from_parts(1_281_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn subscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 26_448_000 picoseconds. + Weight::from_parts(27_057_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:0 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn unsubscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_498_000 picoseconds. + Weight::from_parts(3_614_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn burn_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_575_000 picoseconds. + Weight::from_parts(1_698_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_334_000 picoseconds. + Weight::from_parts(1_435_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_244_000 picoseconds. + Weight::from_parts(1_337_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_244_000 picoseconds. + Weight::from_parts(1_331_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_407_000 picoseconds. + Weight::from_parts(1_522_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn query_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 62_963_000 picoseconds. + Weight::from_parts(64_556_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + fn expect_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_458_000 picoseconds. + Weight::from_parts(8_741_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn report_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 54_068_000 picoseconds. + Weight::from_parts(55_665_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + fn clear_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_290_000 picoseconds. + Weight::from_parts(1_348_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_189_000 picoseconds. + Weight::from_parts(1_268_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn clear_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_197_000 picoseconds. + Weight::from_parts(1_276_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_fees_mode() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_197_000 picoseconds. + Weight::from_parts(1_253_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn unpaid_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_250_000 picoseconds. + Weight::from_parts(1_354_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs b/polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs index a6beeded428649fdda8530fda4c10492867f11a2..f41a5d4ebf8f080eb7a9a11016296e97e3db3a1a 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs @@ -16,26 +16,28 @@ //! Autogenerated weights for `runtime_common::assigned_slots` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_common::assigned_slots // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=runtime_common::assigned_slots -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,7 +50,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::assigned_slots`. pub struct WeightInfo(PhantomData); impl runtime_common::assigned_slots::WeightInfo for WeightInfo { - /// Storage: `Registrar::Paras` (r:1 w:1) + /// Storage: `Registrar::Paras` (r:1 w:0) /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::ParaLifecycles` (r:1 w:1) /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -68,15 +70,15 @@ impl runtime_common::assigned_slots::WeightInfo for Wei /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn assign_perm_parachain_slot() -> Weight { // Proof Size summary in bytes: - // Measured: `673` - // Estimated: `4138` - // Minimum execution time: 84_646_000 picoseconds. - Weight::from_parts(91_791_000, 0) - .saturating_add(Weight::from_parts(0, 4138)) + // Measured: `730` + // Estimated: `4195` + // Minimum execution time: 71_337_000 picoseconds. + Weight::from_parts(80_807_000, 0) + .saturating_add(Weight::from_parts(0, 4195)) .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: `Registrar::Paras` (r:1 w:1) + /// Storage: `Registrar::Paras` (r:1 w:0) /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::ParaLifecycles` (r:1 w:1) /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -98,13 +100,13 @@ impl runtime_common::assigned_slots::WeightInfo for Wei /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn assign_temp_parachain_slot() -> Weight { // Proof Size summary in bytes: - // Measured: `673` - // Estimated: `4138` - // Minimum execution time: 68_091_000 picoseconds. - Weight::from_parts(77_310_000, 0) - .saturating_add(Weight::from_parts(0, 4138)) + // Measured: `730` + // Estimated: `4195` + // Minimum execution time: 60_188_000 picoseconds. + Weight::from_parts(63_932_000, 0) + .saturating_add(Weight::from_parts(0, 4195)) .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(7)) + .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `AssignedSlots::PermanentSlots` (r:1 w:0) /// Proof: `AssignedSlots::PermanentSlots` (`max_values`: None, `max_size`: Some(20), added: 2495, mode: `MaxEncodedLen`) @@ -118,11 +120,11 @@ impl runtime_common::assigned_slots::WeightInfo for Wei /// Proof: `AssignedSlots::TemporarySlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unassign_parachain_slot() -> Weight { // Proof Size summary in bytes: - // Measured: `823` - // Estimated: `4288` - // Minimum execution time: 38_081_000 picoseconds. - Weight::from_parts(40_987_000, 0) - .saturating_add(Weight::from_parts(0, 4288)) + // Measured: `856` + // Estimated: `4321` + // Minimum execution time: 35_764_000 picoseconds. + Weight::from_parts(38_355_000, 0) + .saturating_add(Weight::from_parts(0, 4321)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -132,8 +134,8 @@ impl runtime_common::assigned_slots::WeightInfo for Wei // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_182_000 picoseconds. - Weight::from_parts(7_437_000, 0) + // Minimum execution time: 4_634_000 picoseconds. + Weight::from_parts(4_852_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -143,8 +145,8 @@ impl runtime_common::assigned_slots::WeightInfo for Wei // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_153_000 picoseconds. - Weight::from_parts(7_456_000, 0) + // Minimum execution time: 4_563_000 picoseconds. + Weight::from_parts(4_829_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs b/polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs index 3cd7c7a47e90ee1bf6590d0421faf580c1c776f6..2b756802289488173ce7dbf4cc8e15e4a8ae0f9d 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::auctions` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::auctions // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_auctions.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,92 +50,90 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::auctions`. pub struct WeightInfo(PhantomData); impl runtime_common::auctions::WeightInfo for WeightInfo { - /// Storage: Auctions AuctionInfo (r:1 w:1) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Auctions AuctionCounter (r:1 w:1) - /// Proof: Auctions AuctionCounter (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Auctions::AuctionInfo` (r:1 w:1) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:1) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn new_auction() -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1493` - // Minimum execution time: 12_805_000 picoseconds. - Weight::from_parts(13_153_000, 0) + // Minimum execution time: 7_307_000 picoseconds. + Weight::from_parts(7_680_000, 0) .saturating_add(Weight::from_parts(0, 1493)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions AuctionCounter (r:1 w:0) - /// Proof: Auctions AuctionCounter (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Auctions AuctionInfo (r:1 w:0) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Slots Leases (r:1 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions Winning (r:1 w:1) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions ReservedAmounts (r:2 w:2) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::Winning` (r:1 w:1) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:2 w:2) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn bid() -> Weight { // Proof Size summary in bytes: - // Measured: `728` + // Measured: `761` // Estimated: `6060` - // Minimum execution time: 77_380_000 picoseconds. - Weight::from_parts(80_503_000, 0) + // Minimum execution time: 75_448_000 picoseconds. + Weight::from_parts(78_716_000, 0) .saturating_add(Weight::from_parts(0, 6060)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Auctions AuctionInfo (r:1 w:1) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe NextRandomness (r:1 w:0) - /// Proof: Babe NextRandomness (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: Babe EpochStart (r:1 w:0) - /// Proof: Babe EpochStart (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Auctions AuctionCounter (r:1 w:0) - /// Proof: Auctions AuctionCounter (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Auctions Winning (r:3600 w:3600) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions ReservedAmounts (r:37 w:36) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:36 w:36) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Slots Leases (r:2 w:2) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) + /// Storage: `Auctions::AuctionInfo` (r:1 w:1) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::NextRandomness` (r:1 w:0) + /// Proof: `Babe::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Babe::EpochStart` (r:1 w:0) + /// Proof: `Babe::EpochStart` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Auctions::Winning` (r:3600 w:3600) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:37 w:36) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:36 w:36) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:2 w:2) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn on_initialize() -> Weight { // Proof Size summary in bytes: - // Measured: `6947789` + // Measured: `6947017` // Estimated: `15822990` - // Minimum execution time: 6_311_055_000 picoseconds. - Weight::from_parts(6_409_142_000, 0) + // Minimum execution time: 7_120_207_000 picoseconds. + Weight::from_parts(7_273_496_000, 0) .saturating_add(Weight::from_parts(0, 15822990)) - .saturating_add(T::DbWeight::get().reads(3683)) - .saturating_add(T::DbWeight::get().writes(3678)) + .saturating_add(T::DbWeight::get().reads(3682)) + .saturating_add(T::DbWeight::get().writes(3677)) } - /// Storage: Auctions ReservedAmounts (r:37 w:36) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:36 w:36) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Auctions Winning (r:3600 w:3600) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions AuctionInfo (r:0 w:1) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Auctions::ReservedAmounts` (r:37 w:36) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:36 w:36) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Auctions::Winning` (r:3600 w:3600) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionInfo` (r:0 w:1) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn cancel_auction() -> Weight { // Proof Size summary in bytes: // Measured: `177732` // Estimated: `15822990` - // Minimum execution time: 4_849_561_000 picoseconds. - Weight::from_parts(4_955_226_000, 0) + // Minimum execution time: 5_536_281_000 picoseconds. + Weight::from_parts(5_675_163_000, 0) .saturating_add(Weight::from_parts(0, 15822990)) .saturating_add(T::DbWeight::get().reads(3673)) .saturating_add(T::DbWeight::get().writes(3673)) diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_claims.rs b/polkadot/runtime/rococo/src/weights/runtime_common_claims.rs index 52e0dd24afa0aa5e4cd75d53f53ca7d032ca9588..de2bb71933b7a4e94f9b0f31eb417f75228c6602 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_claims.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_claims.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::claims` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::claims // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_claims.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_claims.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,120 +50,133 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::claims`. pub struct WeightInfo(PhantomData); impl runtime_common::claims::WeightInfo for WeightInfo { - /// Storage: Claims Claims (r:1 w:1) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:1 w:1) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Total (r:1 w:1) - /// Proof Skipped: Claims Total (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:1 w:1) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Claims::Claims` (r:1 w:1) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:1) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Total` (r:1 w:1) + /// Proof: `Claims::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:1 w:1) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) fn claim() -> Weight { // Proof Size summary in bytes: // Measured: `558` // Estimated: `4764` - // Minimum execution time: 144_931_000 picoseconds. - Weight::from_parts(156_550_000, 0) + // Minimum execution time: 181_028_000 picoseconds. + Weight::from_parts(194_590_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Claims Total (r:1 w:1) - /// Proof Skipped: Claims Total (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:0 w:1) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Claims (r:0 w:1) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:0 w:1) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) + /// Storage: `Claims::Total` (r:1 w:1) + /// Proof: `Claims::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:0 w:1) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Claims` (r:0 w:1) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:0 w:1) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) fn mint_claim() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `1701` - // Minimum execution time: 11_300_000 picoseconds. - Weight::from_parts(11_642_000, 0) + // Minimum execution time: 11_224_000 picoseconds. + Weight::from_parts(13_342_000, 0) .saturating_add(Weight::from_parts(0, 1701)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Claims Claims (r:1 w:1) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:1 w:1) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Total (r:1 w:1) - /// Proof Skipped: Claims Total (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:1 w:1) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Claims::Claims` (r:1 w:1) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:1) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Total` (r:1 w:1) + /// Proof: `Claims::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:1 w:1) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) fn claim_attest() -> Weight { // Proof Size summary in bytes: // Measured: `558` // Estimated: `4764` - // Minimum execution time: 149_112_000 picoseconds. - Weight::from_parts(153_872_000, 0) + // Minimum execution time: 187_964_000 picoseconds. + Weight::from_parts(202_553_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Claims Preclaims (r:1 w:1) - /// Proof Skipped: Claims Preclaims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:1 w:1) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Claims (r:1 w:1) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Total (r:1 w:1) - /// Proof Skipped: Claims Total (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:1 w:1) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Claims::Preclaims` (r:1 w:1) + /// Proof: `Claims::Preclaims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:1) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Claims` (r:1 w:1) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Total` (r:1 w:1) + /// Proof: `Claims::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:1 w:1) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) fn attest() -> Weight { // Proof Size summary in bytes: // Measured: `632` // Estimated: `4764` - // Minimum execution time: 69_619_000 picoseconds. - Weight::from_parts(79_242_000, 0) + // Minimum execution time: 78_210_000 picoseconds. + Weight::from_parts(84_581_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: Claims Claims (r:1 w:2) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:1 w:2) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:1 w:2) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Preclaims (r:1 w:1) - /// Proof Skipped: Claims Preclaims (max_values: None, max_size: None, mode: Measured) + /// Storage: `Claims::Claims` (r:1 w:2) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:1 w:2) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:2) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Preclaims` (r:1 w:1) + /// Proof: `Claims::Preclaims` (`max_values`: None, `max_size`: None, mode: `Measured`) fn move_claim() -> Weight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `3905` - // Minimum execution time: 22_066_000 picoseconds. - Weight::from_parts(22_483_000, 0) + // Minimum execution time: 33_940_000 picoseconds. + Weight::from_parts(48_438_000, 0) .saturating_add(Weight::from_parts(0, 3905)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(7)) } + /// Storage: `Claims::Preclaims` (r:1 w:0) + /// Proof: `Claims::Preclaims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:0) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn prevalidate_attests() -> Weight { + // Proof Size summary in bytes: + // Measured: `296` + // Estimated: `3761` + // Minimum execution time: 9_025_000 picoseconds. + Weight::from_parts(10_563_000, 0) + .saturating_add(Weight::from_parts(0, 3761)) + .saturating_add(T::DbWeight::get().reads(2)) + } } diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs b/polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs new file mode 100644 index 0000000000000000000000000000000000000000..d068f07e7594a80a9b728e0776649e52f88a7bf5 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs @@ -0,0 +1,86 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `runtime_common::coretime` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_common::coretime +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_common::coretime`. +pub struct WeightInfo(PhantomData); +impl runtime_common::coretime::WeightInfo for WeightInfo { + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn request_core_count() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_543_000 picoseconds. + Weight::from_parts(7_745_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreSchedules` (r:0 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreSchedules` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 100]`. + fn assign_core(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 9_367_000 picoseconds. + Weight::from_parts(9_932_305, 0) + .saturating_add(Weight::from_parts(0, 3645)) + // Standard Error: 231 + .saturating_add(Weight::from_parts(12_947, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs b/polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs index 0e7420cba2e6c981d4f9c0dd9e3c95c9ce26f1c5..8ebab3d551e69e54832f4da00c302a616cd96354 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::crowdloan` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::crowdloan // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_crowdloan.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,172 +50,168 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::crowdloan`. pub struct WeightInfo(PhantomData); impl runtime_common::crowdloan::WeightInfo for WeightInfo { - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Crowdloan NextFundIndex (r:1 w:1) - /// Proof Skipped: Crowdloan NextFundIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NextFundIndex` (r:1 w:1) + /// Proof: `Crowdloan::NextFundIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `438` // Estimated: `3903` - // Minimum execution time: 50_399_000 picoseconds. - Weight::from_parts(51_641_000, 0) + // Minimum execution time: 46_095_000 picoseconds. + Weight::from_parts(48_111_000, 0) .saturating_add(Weight::from_parts(0, 3903)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Slots Leases (r:1 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions AuctionInfo (r:1 w:0) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:1) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Crowdloan EndingsCount (r:1 w:0) - /// Proof Skipped: Crowdloan EndingsCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Crowdloan NewRaise (r:1 w:1) - /// Proof Skipped: Crowdloan NewRaise (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) - /// Proof Skipped: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::EndingsCount` (r:1 w:0) + /// Proof: `Crowdloan::EndingsCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) fn contribute() -> Weight { // Proof Size summary in bytes: - // Measured: `530` - // Estimated: `3995` - // Minimum execution time: 128_898_000 picoseconds. - Weight::from_parts(130_277_000, 0) - .saturating_add(Weight::from_parts(0, 3995)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `563` + // Estimated: `4028` + // Minimum execution time: 133_059_000 picoseconds. + Weight::from_parts(136_515_000, 0) + .saturating_add(Weight::from_parts(0, 4028)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:1) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: unknown `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) - /// Proof Skipped: unknown `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) fn withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `689` + // Measured: `687` // Estimated: `6196` - // Minimum execution time: 69_543_000 picoseconds. - Weight::from_parts(71_522_000, 0) + // Minimum execution time: 71_733_000 picoseconds. + Weight::from_parts(74_034_000, 0) .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `k` is `[0, 1000]`. fn refund(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `127 + k * (189 ±0)` - // Estimated: `140 + k * (189 ±0)` - // Minimum execution time: 50_735_000 picoseconds. - Weight::from_parts(52_282_000, 0) - .saturating_add(Weight::from_parts(0, 140)) - // Standard Error: 21_607 - .saturating_add(Weight::from_parts(38_955_985, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `125 + k * (189 ±0)` + // Estimated: `138 + k * (189 ±0)` + // Minimum execution time: 46_016_000 picoseconds. + Weight::from_parts(48_260_000, 0) + .saturating_add(Weight::from_parts(0, 138)) + // Standard Error: 21_140 + .saturating_add(Weight::from_parts(39_141_925, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(k.into()))) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 189).saturating_mul(k.into())) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn dissolve() -> Weight { // Proof Size summary in bytes: - // Measured: `515` + // Measured: `514` // Estimated: `6196` - // Minimum execution time: 43_100_000 picoseconds. - Weight::from_parts(44_272_000, 0) + // Minimum execution time: 44_724_000 picoseconds. + Weight::from_parts(47_931_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) fn edit() -> Weight { // Proof Size summary in bytes: - // Measured: `235` - // Estimated: `3700` - // Minimum execution time: 18_702_000 picoseconds. - Weight::from_parts(19_408_000, 0) - .saturating_add(Weight::from_parts(0, 3700)) + // Measured: `234` + // Estimated: `3699` + // Minimum execution time: 19_512_000 picoseconds. + Weight::from_parts(21_129_000, 0) + .saturating_add(Weight::from_parts(0, 3699)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Crowdloan Funds (r:1 w:0) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) - /// Proof Skipped: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Storage: `Crowdloan::Funds` (r:1 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) fn add_memo() -> Weight { // Proof Size summary in bytes: // Measured: `412` // Estimated: `3877` - // Minimum execution time: 25_568_000 picoseconds. - Weight::from_parts(26_203_000, 0) + // Minimum execution time: 33_529_000 picoseconds. + Weight::from_parts(37_082_000, 0) .saturating_add(Weight::from_parts(0, 3877)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Crowdloan Funds (r:1 w:0) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Crowdloan NewRaise (r:1 w:1) - /// Proof Skipped: Crowdloan NewRaise (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Crowdloan::Funds` (r:1 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn poke() -> Weight { // Proof Size summary in bytes: - // Measured: `239` - // Estimated: `3704` - // Minimum execution time: 17_832_000 picoseconds. - Weight::from_parts(18_769_000, 0) - .saturating_add(Weight::from_parts(0, 3704)) + // Measured: `238` + // Estimated: `3703` + // Minimum execution time: 23_153_000 picoseconds. + Weight::from_parts(24_181_000, 0) + .saturating_add(Weight::from_parts(0, 3703)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Auctions AuctionInfo (r:1 w:0) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Crowdloan EndingsCount (r:1 w:1) - /// Proof Skipped: Crowdloan EndingsCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Crowdloan NewRaise (r:1 w:1) - /// Proof Skipped: Crowdloan NewRaise (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Crowdloan Funds (r:100 w:0) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions AuctionCounter (r:1 w:0) - /// Proof: Auctions AuctionCounter (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Paras ParaLifecycles (r:100 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Slots Leases (r:100 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions Winning (r:1 w:1) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions ReservedAmounts (r:100 w:100) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:100 w:100) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::EndingsCount` (r:1 w:1) + /// Proof: `Crowdloan::EndingsCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::Funds` (r:100 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Paras::ParaLifecycles` (r:100 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:100 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::Winning` (r:1 w:1) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:100 w:100) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:100 w:100) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[2, 100]`. fn on_initialize(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `197 + n * (356 ±0)` + // Measured: `229 + n * (356 ±0)` // Estimated: `5385 + n * (2832 ±0)` - // Minimum execution time: 128_319_000 picoseconds. - Weight::from_parts(130_877_000, 0) + // Minimum execution time: 120_164_000 picoseconds. + Weight::from_parts(3_390_119, 0) .saturating_add(Weight::from_parts(0, 5385)) - // Standard Error: 61_381 - .saturating_add(Weight::from_parts(60_209_202, 0).saturating_mul(n.into())) + // Standard Error: 41_727 + .saturating_add(Weight::from_parts(54_453_016, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs b/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs index cec357453b67be400c0191ac7d5c12e6961a4bee..9b0cb98e6c01f8cb560ea8a99cd42cce9a04fb24 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs @@ -1,36 +1,43 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Polkadot. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . //! Autogenerated weights for `runtime_common::identity_migrator` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `sbtb`, CPU: `13th Gen Intel(R) Core(TM) i7-1365U` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/release/polkadot +// ./target/production/polkadot // benchmark // pallet // --chain=rococo-dev -// --steps=2 -// --repeat=1 +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::identity_migrator // --extrinsic=* -// --output=./migrator-release.rs +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -44,7 +51,7 @@ use core::marker::PhantomData; pub struct WeightInfo(PhantomData); impl runtime_common::identity_migrator::WeightInfo for WeightInfo { /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) @@ -63,34 +70,34 @@ impl runtime_common::identity_migrator::WeightInfo for /// The range of component `s` is `[0, 100]`. fn reap_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7292 + r * (8 ±0) + s * (32 ±0)` - // Estimated: `11003 + r * (8 ±0) + s * (33 ±0)` - // Minimum execution time: 163_756_000 picoseconds. - Weight::from_parts(158_982_500, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 1_143_629 - .saturating_add(Weight::from_parts(238_675, 0).saturating_mul(r.into())) - // Standard Error: 228_725 - .saturating_add(Weight::from_parts(1_529_645, 0).saturating_mul(s.into())) + // Measured: `7457 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037 + r * (7 ±0) + s * (32 ±0)` + // Minimum execution time: 157_343_000 picoseconds. + Weight::from_parts(159_289_236, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 16_439 + .saturating_add(Weight::from_parts(224_293, 0).saturating_mul(r.into())) + // Standard Error: 3_367 + .saturating_add(Weight::from_parts(1_383_637, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes(6)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) - .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) - .saturating_add(Weight::from_parts(0, 33).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(s.into())) } /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) fn poke_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `7229` - // Estimated: `11003` - // Minimum execution time: 137_570_000 picoseconds. - Weight::from_parts(137_570_000, 0) - .saturating_add(Weight::from_parts(0, 11003)) + // Measured: `7242` + // Estimated: `11037` + // Minimum execution time: 114_384_000 picoseconds. + Weight::from_parts(115_741_000, 0) + .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs b/polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs index 0a56562a1a95f24e9efddc6c26e16c4feeaf37c2..e066106e13450c78159994a808254b44cac43eff 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::paras_registrar` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::paras_registrar // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_paras_registrar.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,175 +50,169 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::paras_registrar`. pub struct WeightInfo(PhantomData); impl runtime_common::paras_registrar::WeightInfo for WeightInfo { - /// Storage: Registrar NextFreeParaId (r:1 w:1) - /// Proof Skipped: Registrar NextFreeParaId (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::NextFreeParaId` (r:1 w:1) + /// Proof: `Registrar::NextFreeParaId` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve() -> Weight { // Proof Size summary in bytes: - // Measured: `97` - // Estimated: `3562` - // Minimum execution time: 29_948_000 picoseconds. - Weight::from_parts(30_433_000, 0) - .saturating_add(Weight::from_parts(0, 3562)) + // Measured: `96` + // Estimated: `3561` + // Minimum execution time: 24_109_000 picoseconds. + Weight::from_parts(24_922_000, 0) + .saturating_add(Weight::from_parts(0, 3561)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:0 w:1) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpcomingParasGenesis (r:0 w:1) - /// Proof Skipped: Paras UpcomingParasGenesis (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:0 w:1) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingParasGenesis` (r:0 w:1) + /// Proof: `Paras::UpcomingParasGenesis` (`max_values`: None, `max_size`: None, mode: `Measured`) fn register() -> Weight { // Proof Size summary in bytes: - // Measured: `616` - // Estimated: `4081` - // Minimum execution time: 6_332_113_000 picoseconds. - Weight::from_parts(6_407_158_000, 0) - .saturating_add(Weight::from_parts(0, 4081)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `352` + // Estimated: `3817` + // Minimum execution time: 7_207_580_000 picoseconds. + Weight::from_parts(7_298_567_000, 0) + .saturating_add(Weight::from_parts(0, 3817)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(8)) } - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:0 w:1) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpcomingParasGenesis (r:0 w:1) - /// Proof Skipped: Paras UpcomingParasGenesis (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:0 w:1) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingParasGenesis` (r:0 w:1) + /// Proof: `Paras::UpcomingParasGenesis` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_register() -> Weight { // Proof Size summary in bytes: - // Measured: `533` - // Estimated: `3998` - // Minimum execution time: 6_245_403_000 picoseconds. - Weight::from_parts(6_289_575_000, 0) - .saturating_add(Weight::from_parts(0, 3998)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `269` + // Estimated: `3734` + // Minimum execution time: 7_196_460_000 picoseconds. + Weight::from_parts(7_385_729_000, 0) + .saturating_add(Weight::from_parts(0, 3734)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(8)) } - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeHash (r:1 w:0) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: Registrar PendingSwap (r:0 w:1) - /// Proof Skipped: Registrar PendingSwap (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `Registrar::PendingSwap` (r:0 w:1) + /// Proof: `Registrar::PendingSwap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `476` - // Estimated: `3941` - // Minimum execution time: 49_822_000 picoseconds. - Weight::from_parts(50_604_000, 0) - .saturating_add(Weight::from_parts(0, 3941)) + // Measured: `499` + // Estimated: `3964` + // Minimum execution time: 54_761_000 picoseconds. + Weight::from_parts(57_931_000, 0) + .saturating_add(Weight::from_parts(0, 3964)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Registrar Paras (r:1 w:0) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:2 w:2) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar PendingSwap (r:1 w:1) - /// Proof Skipped: Registrar PendingSwap (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Crowdloan Funds (r:2 w:2) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Slots Leases (r:2 w:2) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:2 w:2) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::PendingSwap` (r:1 w:1) + /// Proof: `Registrar::PendingSwap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::Funds` (r:2 w:2) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:2 w:2) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) fn swap() -> Weight { // Proof Size summary in bytes: - // Measured: `780` - // Estimated: `6720` - // Minimum execution time: 55_166_000 picoseconds. - Weight::from_parts(56_913_000, 0) - .saturating_add(Weight::from_parts(0, 6720)) + // Measured: `837` + // Estimated: `6777` + // Minimum execution time: 59_564_000 picoseconds. + Weight::from_parts(62_910_000, 0) + .saturating_add(Weight::from_parts(0, 6777)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(8)) } - /// Storage: Paras FutureCodeHash (r:1 w:1) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeRestrictionSignal (r:1 w:1) - /// Proof Skipped: Paras UpgradeRestrictionSignal (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeCooldowns (r:1 w:1) - /// Proof Skipped: Paras UpgradeCooldowns (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[1, 3145728]`. + /// Storage: `Paras::FutureCodeHash` (r:1 w:1) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:1) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeCooldowns` (r:1 w:1) + /// Proof: `Paras::UpgradeCooldowns` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[9, 3145728]`. fn schedule_code_upgrade(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `464` - // Estimated: `3929` - // Minimum execution time: 43_650_000 picoseconds. - Weight::from_parts(43_918_000, 0) - .saturating_add(Weight::from_parts(0, 3929)) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_041, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `201` + // Estimated: `3666` + // Minimum execution time: 33_106_000 picoseconds. + Weight::from_parts(33_526_000, 0) + .saturating_add(Weight::from_parts(0, 3666)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_334, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1048576]`. fn set_current_head(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_666_000 picoseconds. - Weight::from_parts(8_893_000, 0) + // Minimum execution time: 5_992_000 picoseconds. + Weight::from_parts(12_059_689, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(855, 0).saturating_mul(b.into())) + // Standard Error: 0 + .saturating_add(Weight::from_parts(959, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_slots.rs b/polkadot/runtime/rococo/src/weights/runtime_common_slots.rs index 23ab1ed3ee0efff9e95fd3526d022c9becad7b93..dd10dbbf1f112d51f0d1270663299baab7af2f59 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_slots.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_slots.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::slots` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::slots // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_slots.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_slots.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,86 +50,82 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::slots`. pub struct WeightInfo(PhantomData); impl runtime_common::slots::WeightInfo for WeightInfo { - /// Storage: Slots Leases (r:1 w:1) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_lease() -> Weight { // Proof Size summary in bytes: - // Measured: `287` - // Estimated: `3752` - // Minimum execution time: 29_932_000 picoseconds. - Weight::from_parts(30_334_000, 0) - .saturating_add(Weight::from_parts(0, 3752)) + // Measured: `320` + // Estimated: `3785` + // Minimum execution time: 26_570_000 picoseconds. + Weight::from_parts(27_619_000, 0) + .saturating_add(Weight::from_parts(0, 3785)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Slots Leases (r:101 w:100) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:200 w:200) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:100 w:100) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::Parachains` (r:1 w:0) + /// Proof: `Paras::Parachains` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:101 w:100) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:200 w:200) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 100]`. /// The range of component `t` is `[0, 100]`. fn manage_lease_period_start(c: u32, t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `26 + c * (47 ±0) + t * (308 ±0)` - // Estimated: `2800 + c * (2526 ±0) + t * (2789 ±0)` - // Minimum execution time: 634_547_000 picoseconds. - Weight::from_parts(643_045_000, 0) - .saturating_add(Weight::from_parts(0, 2800)) - // Standard Error: 81_521 - .saturating_add(Weight::from_parts(2_705_219, 0).saturating_mul(c.into())) - // Standard Error: 81_521 - .saturating_add(Weight::from_parts(11_464_132, 0).saturating_mul(t.into())) + // Measured: `594 + c * (20 ±0) + t * (234 ±0)` + // Estimated: `4065 + c * (2496 ±0) + t * (2709 ±0)` + // Minimum execution time: 729_793_000 picoseconds. + Weight::from_parts(740_820_000, 0) + .saturating_add(Weight::from_parts(0, 4065)) + // Standard Error: 88_206 + .saturating_add(Weight::from_parts(2_793_142, 0).saturating_mul(c.into())) + // Standard Error: 88_206 + .saturating_add(Weight::from_parts(8_933_065, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(t.into()))) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2526).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2789).saturating_mul(t.into())) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(t.into()))) + .saturating_add(Weight::from_parts(0, 2496).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 2709).saturating_mul(t.into())) } - /// Storage: Slots Leases (r:1 w:1) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: System Account (r:8 w:8) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:8 w:8) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn clear_all_leases() -> Weight { // Proof Size summary in bytes: - // Measured: `2759` + // Measured: `2792` // Estimated: `21814` - // Minimum execution time: 129_756_000 picoseconds. - Weight::from_parts(131_810_000, 0) + // Minimum execution time: 123_888_000 picoseconds. + Weight::from_parts(131_245_000, 0) .saturating_add(Weight::from_parts(0, 21814)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(9)) } - /// Storage: Slots Leases (r:1 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn trigger_onboard() -> Weight { // Proof Size summary in bytes: - // Measured: `707` - // Estimated: `4172` - // Minimum execution time: 29_527_000 picoseconds. - Weight::from_parts(30_055_000, 0) - .saturating_add(Weight::from_parts(0, 4172)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(3)) + // Measured: `612` + // Estimated: `4077` + // Minimum execution time: 27_341_000 picoseconds. + Weight::from_parts(28_697_000, 0) + .saturating_add(Weight::from_parts(0, 4077)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) } } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs index ac0f05301b486dbdbb8c0ca004e195ab47171ff3..653e1009f31c3285b7de50fe9b7071d0e4050394 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs @@ -16,26 +16,28 @@ //! Autogenerated weights for `runtime_parachains::assigner_on_demand` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_parachains::assigner_on_demand // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=runtime_parachains::assigner_on_demand -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -57,13 +59,13 @@ impl runtime_parachains::assigner_on_demand::WeightInfo /// The range of component `s` is `[1, 9999]`. fn place_order_keep_alive(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_522_000 picoseconds. - Weight::from_parts(35_436_835, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 129 - .saturating_add(Weight::from_parts(14_041, 0).saturating_mul(s.into())) + // Measured: `363 + s * (4 ±0)` + // Estimated: `3828 + s * (4 ±0)` + // Minimum execution time: 25_298_000 picoseconds. + Weight::from_parts(21_486_098, 0) + .saturating_add(Weight::from_parts(0, 3828)) + // Standard Error: 136 + .saturating_add(Weight::from_parts(13_943, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) @@ -77,13 +79,13 @@ impl runtime_parachains::assigner_on_demand::WeightInfo /// The range of component `s` is `[1, 9999]`. fn place_order_allow_death(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_488_000 picoseconds. - Weight::from_parts(34_848_934, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 143 - .saturating_add(Weight::from_parts(14_215, 0).saturating_mul(s.into())) + // Measured: `363 + s * (4 ±0)` + // Estimated: `3828 + s * (4 ±0)` + // Minimum execution time: 25_421_000 picoseconds. + Weight::from_parts(21_828_043, 0) + .saturating_add(Weight::from_parts(0, 3828)) + // Standard Error: 133 + .saturating_add(Weight::from_parts(13_831, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs index 34541b83597e6284a401a171e703b200366114a0..caad090ae15bf7d7d79a696d6a178cffeeca1507 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs @@ -16,26 +16,28 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_parachains::configuration // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=runtime_parachains::configuration -// --chain=rococo-dev // --header=./polkadot/file_header.txt -// --output=./polkadot/runtime/rococo/src/weights/ +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -58,8 +60,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_793_000 picoseconds. - Weight::from_parts(8_192_000, 0) + // Minimum execution time: 7_689_000 picoseconds. + Weight::from_parts(8_089_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -74,8 +76,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_819_000 picoseconds. - Weight::from_parts(8_004_000, 0) + // Minimum execution time: 7_735_000 picoseconds. + Weight::from_parts(8_150_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -90,8 +92,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_760_000 picoseconds. - Weight::from_parts(8_174_000, 0) + // Minimum execution time: 7_902_000 picoseconds. + Weight::from_parts(8_196_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -116,8 +118,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_814_000 picoseconds. - Weight::from_parts(8_098_000, 0) + // Minimum execution time: 7_634_000 picoseconds. + Weight::from_parts(7_983_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -132,8 +134,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_028_000 picoseconds. - Weight::from_parts(10_386_000, 0) + // Minimum execution time: 9_580_000 picoseconds. + Weight::from_parts(9_989_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -148,8 +150,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_867_000 picoseconds. - Weight::from_parts(8_191_000, 0) + // Minimum execution time: 7_787_000 picoseconds. + Weight::from_parts(8_008_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -164,8 +166,24 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_158_000 picoseconds. - Weight::from_parts(10_430_000, 0) + // Minimum execution time: 9_557_000 picoseconds. + Weight::from_parts(9_994_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_scheduler_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_775_000 picoseconds. + Weight::from_parts(7_989_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs index 63a8c3addc7da4296041627ee8492945945bce7f..cf1aa36e10e704ab0074730896f6b5d2f9a4fedf 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::disputes` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::disputes // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_disputes.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,14 +50,14 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::disputes`. pub struct WeightInfo(PhantomData); impl runtime_parachains::disputes::WeightInfo for WeightInfo { - /// Storage: ParasDisputes Frozen (r:0 w:1) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ParasDisputes::Frozen` (r:0 w:1) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_unfreeze() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_937_000 picoseconds. - Weight::from_parts(3_082_000, 0) + // Minimum execution time: 1_855_000 picoseconds. + Weight::from_parts(2_015_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs index 417820e6627f6e39c4107df722f4b83f9cca6a4b..a3b912491e5ec56c340b3dfd3b46e95f39f57e3c 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::hrmp` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::hrmp // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_hrmp.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,105 +50,97 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::hrmp`. pub struct WeightInfo(PhantomData); impl runtime_parachains::hrmp::WeightInfo for WeightInfo { - /// Storage: Paras ParaLifecycles (r:2 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:1 w:0) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn hrmp_init_open_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `704` - // Estimated: `6644` - // Minimum execution time: 41_564_000 picoseconds. - Weight::from_parts(42_048_000, 0) - .saturating_add(Weight::from_parts(0, 6644)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `488` + // Estimated: `3953` + // Minimum execution time: 34_911_000 picoseconds. + Weight::from_parts(35_762_000, 0) + .saturating_add(Weight::from_parts(0, 3953)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn hrmp_accept_open_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `936` - // Estimated: `4401` - // Minimum execution time: 43_570_000 picoseconds. - Weight::from_parts(44_089_000, 0) - .saturating_add(Weight::from_parts(0, 4401)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `478` + // Estimated: `3943` + // Minimum execution time: 31_483_000 picoseconds. + Weight::from_parts(32_230_000, 0) + .saturating_add(Weight::from_parts(0, 3943)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Hrmp HrmpChannels (r:1 w:0) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpCloseChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpCloseChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpCloseChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpCloseChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn hrmp_close_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `807` - // Estimated: `4272` - // Minimum execution time: 36_594_000 picoseconds. - Weight::from_parts(37_090_000, 0) - .saturating_add(Weight::from_parts(0, 4272)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `591` + // Estimated: `4056` + // Minimum execution time: 32_153_000 picoseconds. + Weight::from_parts(32_982_000, 0) + .saturating_add(Weight::from_parts(0, 4056)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:254 w:254) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:0 w:1) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelContents (r:0 w:254) - /// Proof Skipped: Hrmp HrmpChannelContents (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:0 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:254 w:254) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:0 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelContents` (r:0 w:254) + /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:0 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 127]`. /// The range of component `e` is `[0, 127]`. fn force_clean_hrmp(i: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `264 + e * (100 ±0) + i * (100 ±0)` - // Estimated: `3726 + e * (2575 ±0) + i * (2575 ±0)` - // Minimum execution time: 1_085_140_000 picoseconds. - Weight::from_parts(1_100_901_000, 0) - .saturating_add(Weight::from_parts(0, 3726)) - // Standard Error: 98_982 - .saturating_add(Weight::from_parts(3_229_112, 0).saturating_mul(i.into())) - // Standard Error: 98_982 - .saturating_add(Weight::from_parts(3_210_944, 0).saturating_mul(e.into())) + // Measured: `297 + e * (100 ±0) + i * (100 ±0)` + // Estimated: `3759 + e * (2575 ±0) + i * (2575 ±0)` + // Minimum execution time: 1_240_769_000 picoseconds. + Weight::from_parts(1_249_285_000, 0) + .saturating_add(Weight::from_parts(0, 3759)) + // Standard Error: 112_346 + .saturating_add(Weight::from_parts(3_449_114, 0).saturating_mul(i.into())) + // Standard Error: 112_346 + .saturating_add(Weight::from_parts(3_569_184, 0).saturating_mul(e.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(e.into()))) @@ -155,139 +150,139 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf .saturating_add(Weight::from_parts(0, 2575).saturating_mul(e.into())) .saturating_add(Weight::from_parts(0, 2575).saturating_mul(i.into())) } - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:128 w:128) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:256 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:128 w:128) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:128 w:128) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:0 w:128) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:256 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:128 w:128) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:0 w:128) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn force_process_hrmp_open(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `779 + c * (136 ±0)` - // Estimated: `2234 + c * (5086 ±0)` - // Minimum execution time: 10_497_000 picoseconds. - Weight::from_parts(6_987_455, 0) - .saturating_add(Weight::from_parts(0, 2234)) - // Standard Error: 18_540 - .saturating_add(Weight::from_parts(18_788_534, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) + // Measured: `525 + c * (136 ±0)` + // Estimated: `1980 + c * (5086 ±0)` + // Minimum execution time: 6_026_000 picoseconds. + Weight::from_parts(6_257_000, 0) + .saturating_add(Weight::from_parts(0, 1980)) + // Standard Error: 9_732 + .saturating_add(Weight::from_parts(21_049_890, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((6_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 5086).saturating_mul(c.into())) } - /// Storage: Hrmp HrmpCloseChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpCloseChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:128 w:128) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpCloseChannelRequests (r:0 w:128) - /// Proof Skipped: Hrmp HrmpCloseChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelContents (r:0 w:128) - /// Proof Skipped: Hrmp HrmpChannelContents (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpCloseChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:128 w:128) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequests` (r:0 w:128) + /// Proof: `Hrmp::HrmpCloseChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelContents` (r:0 w:128) + /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn force_process_hrmp_close(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `335 + c * (124 ±0)` - // Estimated: `1795 + c * (2600 ±0)` - // Minimum execution time: 6_575_000 picoseconds. - Weight::from_parts(1_228_642, 0) - .saturating_add(Weight::from_parts(0, 1795)) - // Standard Error: 14_826 - .saturating_add(Weight::from_parts(11_604_038, 0).saturating_mul(c.into())) + // Measured: `368 + c * (124 ±0)` + // Estimated: `1828 + c * (2600 ±0)` + // Minimum execution time: 4_991_000 picoseconds. + Weight::from_parts(984_758, 0) + .saturating_add(Weight::from_parts(0, 1828)) + // Standard Error: 11_918 + .saturating_add(Weight::from_parts(13_018_813, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2600).saturating_mul(c.into())) } - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn hrmp_cancel_open_request(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1026 + c * (13 ±0)` - // Estimated: `4295 + c * (15 ±0)` - // Minimum execution time: 22_301_000 picoseconds. - Weight::from_parts(26_131_473, 0) - .saturating_add(Weight::from_parts(0, 4295)) - // Standard Error: 830 - .saturating_add(Weight::from_parts(49_448, 0).saturating_mul(c.into())) + // Measured: `1059 + c * (13 ±0)` + // Estimated: `4328 + c * (15 ±0)` + // Minimum execution time: 17_299_000 picoseconds. + Weight::from_parts(27_621_478, 0) + .saturating_add(Weight::from_parts(0, 4328)) + // Standard Error: 2_527 + .saturating_add(Weight::from_parts(121_149, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 15).saturating_mul(c.into())) } - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:128 w:128) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn clean_open_channel_requests(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `243 + c * (63 ±0)` - // Estimated: `1722 + c * (2538 ±0)` - // Minimum execution time: 5_234_000 picoseconds. - Weight::from_parts(7_350_270, 0) - .saturating_add(Weight::from_parts(0, 1722)) - // Standard Error: 3_105 - .saturating_add(Weight::from_parts(2_981_935, 0).saturating_mul(c.into())) + // Measured: `276 + c * (63 ±0)` + // Estimated: `1755 + c * (2538 ±0)` + // Minimum execution time: 3_764_000 picoseconds. + Weight::from_parts(5_935_301, 0) + .saturating_add(Weight::from_parts(0, 1755)) + // Standard Error: 3_761 + .saturating_add(Weight::from_parts(3_290_277, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2538).saturating_mul(c.into())) } - /// Storage: Paras ParaLifecycles (r:2 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:1 w:0) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:2 w:2) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:2 w:2) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - fn force_open_hrmp_channel(_c: u32, ) -> Weight { + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `c` is `[0, 1]`. + fn force_open_hrmp_channel(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `704` - // Estimated: `6644` - // Minimum execution time: 55_611_000 picoseconds. - Weight::from_parts(56_488_000, 0) - .saturating_add(Weight::from_parts(0, 6644)) - .saturating_add(T::DbWeight::get().reads(14)) + // Measured: `488 + c * (235 ±0)` + // Estimated: `6428 + c * (235 ±0)` + // Minimum execution time: 49_506_000 picoseconds. + Weight::from_parts(51_253_075, 0) + .saturating_add(Weight::from_parts(0, 6428)) + // Standard Error: 144_082 + .saturating_add(Weight::from_parts(12_862_224, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(8)) + .saturating_add(Weight::from_parts(0, 235).saturating_mul(c.into())) } /// Storage: `Paras::ParaLifecycles` (r:1 w:0) /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -311,11 +306,11 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) fn establish_system_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `417` - // Estimated: `6357` - // Minimum execution time: 629_674_000 picoseconds. - Weight::from_parts(640_174_000, 0) - .saturating_add(Weight::from_parts(0, 6357)) + // Measured: `488` + // Estimated: `6428` + // Minimum execution time: 50_016_000 picoseconds. + Weight::from_parts(50_933_000, 0) + .saturating_add(Weight::from_parts(0, 6428)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(8)) } @@ -323,11 +318,11 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) fn poke_channel_deposits() -> Weight { // Proof Size summary in bytes: - // Measured: `263` - // Estimated: `3728` - // Minimum execution time: 173_371_000 picoseconds. - Weight::from_parts(175_860_000, 0) - .saturating_add(Weight::from_parts(0, 3728)) + // Measured: `296` + // Estimated: `3761` + // Minimum execution time: 12_280_000 picoseconds. + Weight::from_parts(12_863_000, 0) + .saturating_add(Weight::from_parts(0, 3761)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_inclusion.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_inclusion.rs index a121ad774cefecf91e57838e03107fde22674f94..b9ec7565bd555d1b5b06fd9db01e7ca2ee765187 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_inclusion.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_inclusion.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::inclusion` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::inclusion // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_inclusion.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_inclusion.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,27 +50,25 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::inclusion`. pub struct WeightInfo(PhantomData); impl runtime_parachains::inclusion::WeightInfo for WeightInfo { - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:999) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:999) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) /// The range of component `i` is `[1, 1000]`. fn receive_upward_messages(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `33280` + // Measured: `32993` // Estimated: `36283` - // Minimum execution time: 71_094_000 picoseconds. - Weight::from_parts(71_436_000, 0) + // Minimum execution time: 72_675_000 picoseconds. + Weight::from_parts(73_290_000, 0) .saturating_add(Weight::from_parts(0, 36283)) - // Standard Error: 22_149 - .saturating_add(Weight::from_parts(51_495_472, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(3)) + // Standard Error: 16_067 + .saturating_add(Weight::from_parts(57_735_739, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs index 5c627507dfb682a603152fee2155b620773dcd49..e8c554610c999dd93bbc6cb10025933438adc968 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::initializer` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::initializer // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_initializer.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,18 +50,18 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::initializer`. pub struct WeightInfo(PhantomData); impl runtime_parachains::initializer::WeightInfo for WeightInfo { - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `d` is `[0, 65536]`. fn force_approve(d: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + d * (11 ±0)` // Estimated: `1480 + d * (11 ±0)` - // Minimum execution time: 3_771_000 picoseconds. - Weight::from_parts(6_491_437, 0) + // Minimum execution time: 2_634_000 picoseconds. + Weight::from_parts(2_728_000, 0) .saturating_add(Weight::from_parts(0, 1480)) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_356, 0).saturating_mul(d.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(2_499, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(Weight::from_parts(0, 11).saturating_mul(d.into())) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs index dfd95006dc7d0a6cedeeab9908d99a2c1c7e561a..af26bfc9ae9b25f5a3a50938ceb1b978646c65eb 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::paras` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::paras // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_paras.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,247 +50,248 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras`. pub struct WeightInfo(PhantomData); impl runtime_parachains::paras::WeightInfo for WeightInfo { - /// Storage: Paras CurrentCodeHash (r:1 w:1) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PastCodeMeta (r:1 w:1) - /// Proof Skipped: Paras PastCodeMeta (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PastCodePruning (r:1 w:1) - /// Proof Skipped: Paras PastCodePruning (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PastCodeHash (r:0 w:1) - /// Proof Skipped: Paras PastCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:0 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:1) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PastCodeMeta` (r:1 w:1) + /// Proof: `Paras::PastCodeMeta` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PastCodePruning` (r:1 w:1) + /// Proof: `Paras::PastCodePruning` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PastCodeHash` (r:0 w:1) + /// Proof: `Paras::PastCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:0 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 3145728]`. fn force_set_current_code(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `8309` // Estimated: `11774` - // Minimum execution time: 31_941_000 picoseconds. - Weight::from_parts(32_139_000, 0) + // Minimum execution time: 27_488_000 picoseconds. + Weight::from_parts(27_810_000, 0) .saturating_add(Weight::from_parts(0, 11774)) - // Standard Error: 5 - .saturating_add(Weight::from_parts(2_011, 0).saturating_mul(c.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(2_189, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 1048576]`. fn force_set_current_head(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_275_000 picoseconds. - Weight::from_parts(8_321_000, 0) + // Minimum execution time: 5_793_000 picoseconds. + Weight::from_parts(7_987_606, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(858, 0).saturating_mul(s.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(971, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Paras Heads (r:0 w:1) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_set_most_recent_context() -> Weight { - Weight::from_parts(10_155_000, 0) - // Standard Error: 0 - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_733_000 picoseconds. + Weight::from_parts(2_954_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras FutureCodeHash (r:1 w:1) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeCooldowns (r:1 w:1) - /// Proof Skipped: Paras UpgradeCooldowns (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeRestrictionSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeRestrictionSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::FutureCodeHash` (r:1 w:1) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeCooldowns` (r:1 w:1) + /// Proof: `Paras::UpgradeCooldowns` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 3145728]`. fn force_schedule_code_upgrade(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `8715` - // Estimated: `12180` - // Minimum execution time: 49_923_000 picoseconds. - Weight::from_parts(50_688_000, 0) - .saturating_add(Weight::from_parts(0, 12180)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_976, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `8452` + // Estimated: `11917` + // Minimum execution time: 6_072_000 picoseconds. + Weight::from_parts(6_128_000, 0) + .saturating_add(Weight::from_parts(0, 11917)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_334, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 1048576]`. fn force_note_new_head(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `3560` - // Minimum execution time: 14_408_000 picoseconds. - Weight::from_parts(14_647_000, 0) - .saturating_add(Weight::from_parts(0, 3560)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(858, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `163` + // Estimated: `3628` + // Minimum execution time: 15_166_000 picoseconds. + Weight::from_parts(21_398_053, 0) + .saturating_add(Weight::from_parts(0, 3628)) + // Standard Error: 1 + .saturating_add(Weight::from_parts(976, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_queue_action() -> Weight { // Proof Size summary in bytes: - // Measured: `4288` - // Estimated: `7753` - // Minimum execution time: 20_009_000 picoseconds. - Weight::from_parts(20_518_000, 0) - .saturating_add(Weight::from_parts(0, 7753)) + // Measured: `4312` + // Estimated: `7777` + // Minimum execution time: 16_345_000 picoseconds. + Weight::from_parts(16_712_000, 0) + .saturating_add(Weight::from_parts(0, 7777)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 3145728]`. fn add_trusted_validation_code(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `946` - // Estimated: `4411` - // Minimum execution time: 80_626_000 picoseconds. - Weight::from_parts(52_721_755, 0) - .saturating_add(Weight::from_parts(0, 4411)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_443, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `683` + // Estimated: `4148` + // Minimum execution time: 78_076_000 picoseconds. + Weight::from_parts(123_193_814, 0) + .saturating_add(Weight::from_parts(0, 4148)) + // Standard Error: 5 + .saturating_add(Weight::from_parts(1_770, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Paras CodeByHashRefs (r:1 w:0) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:0 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:0) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:0 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) fn poke_unused_validation_code() -> Weight { // Proof Size summary in bytes: // Measured: `28` // Estimated: `3493` - // Minimum execution time: 6_692_000 picoseconds. - Weight::from_parts(7_009_000, 0) + // Minimum execution time: 5_184_000 picoseconds. + Weight::from_parts(5_430_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement() -> Weight { // Proof Size summary in bytes: - // Measured: `26682` - // Estimated: `30147` - // Minimum execution time: 87_994_000 picoseconds. - Weight::from_parts(89_933_000, 0) - .saturating_add(Weight::from_parts(0, 30147)) + // Measured: `26706` + // Estimated: `30171` + // Minimum execution time: 102_995_000 picoseconds. + Weight::from_parts(108_977_000, 0) + .saturating_add(Weight::from_parts(0, 30171)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras UpcomingUpgrades (r:1 w:1) - /// Proof Skipped: Paras UpcomingUpgrades (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:0 w:100) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingUpgrades` (r:1 w:1) + /// Proof: `Paras::UpcomingUpgrades` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:0 w:100) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_upgrade_accept() -> Weight { // Proof Size summary in bytes: - // Measured: `27523` - // Estimated: `30988` - // Minimum execution time: 783_222_000 picoseconds. - Weight::from_parts(794_959_000, 0) - .saturating_add(Weight::from_parts(0, 30988)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `27360` + // Estimated: `30825` + // Minimum execution time: 709_433_000 picoseconds. + Weight::from_parts(725_074_000, 0) + .saturating_add(Weight::from_parts(0, 30825)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(104)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_upgrade_reject() -> Weight { // Proof Size summary in bytes: - // Measured: `27214` - // Estimated: `30679` - // Minimum execution time: 87_424_000 picoseconds. - Weight::from_parts(88_737_000, 0) - .saturating_add(Weight::from_parts(0, 30679)) + // Measured: `27338` + // Estimated: `30803` + // Minimum execution time: 98_973_000 picoseconds. + Weight::from_parts(104_715_000, 0) + .saturating_add(Weight::from_parts(0, 30803)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_onboarding_accept() -> Weight { // Proof Size summary in bytes: - // Measured: `26991` - // Estimated: `30456` - // Minimum execution time: 612_485_000 picoseconds. - Weight::from_parts(621_670_000, 0) - .saturating_add(Weight::from_parts(0, 30456)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `26728` + // Estimated: `30193` + // Minimum execution time: 550_958_000 picoseconds. + Weight::from_parts(564_497_000, 0) + .saturating_add(Weight::from_parts(0, 30193)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_onboarding_reject() -> Weight { // Proof Size summary in bytes: - // Measured: `26682` - // Estimated: `30147` - // Minimum execution time: 86_673_000 picoseconds. - Weight::from_parts(87_424_000, 0) - .saturating_add(Weight::from_parts(0, 30147)) + // Measured: `26706` + // Estimated: `30171` + // Minimum execution time: 97_088_000 picoseconds. + Weight::from_parts(103_617_000, 0) + .saturating_add(Weight::from_parts(0, 30171)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs index a102d1903b2f2b4a757edf3bb96809c982f31383..374927f8470de51d6c8c9ab4a0818f03fe849e97 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs @@ -13,161 +13,334 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . + //! Autogenerated weights for `runtime_parachains::paras_inherent` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-11-20, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 128 +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/release/polkadot +// ./target/production/polkadot // benchmark +// pallet // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::paras_inherent // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs -// --header=./file_header.txt +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras_inherent`. pub struct WeightInfo(PhantomData); impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParaSessionInfo Sessions (r:1 w:0) - // Storage: ParasDisputes Disputes (r:1 w:1) - // Storage: ParasDisputes Included (r:1 w:1) - // Storage: ParasDisputes SpamSlots (r:1 w:1) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaSessionInfo::Sessions` (r:1 w:0) + /// Proof: `ParaSessionInfo::Sessions` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:1) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::BackersOnDisputes` (r:1 w:1) + /// Proof: `ParasDisputes::BackersOnDisputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:1 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailability` (r:2 w:1) + /// Proof: `ParaInclusion::PendingAvailability` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailabilityCommitments` (r:1 w:1) + /// Proof: `ParaInclusion::PendingAvailabilityCommitments` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `v` is `[10, 200]`. fn enter_variable_disputes(v: u32, ) -> Weight { - Weight::from_parts(352_590_000 as u64, 0) - // Standard Error: 13_000 - .saturating_add(Weight::from_parts(49_254_000 as u64, 0).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(24 as u64)) - .saturating_add(T::DbWeight::get().writes(16 as u64)) + // Proof Size summary in bytes: + // Measured: `67819` + // Estimated: `73759 + v * (23 ±0)` + // Minimum execution time: 874_229_000 picoseconds. + Weight::from_parts(486_359_072, 0) + .saturating_add(Weight::from_parts(0, 73759)) + // Standard Error: 19_197 + .saturating_add(Weight::from_parts(41_842_161, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(26)) + .saturating_add(T::DbWeight::get().writes(16)) + .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:1 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInclusion AvailabilityBitfields (r:0 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailability` (r:2 w:1) + /// Proof: `ParaInclusion::PendingAvailability` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailabilityCommitments` (r:1 w:1) + /// Proof: `ParaInclusion::PendingAvailabilityCommitments` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::AvailabilityBitfields` (r:0 w:1) + /// Proof: `ParaInclusion::AvailabilityBitfields` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_bitfields() -> Weight { - Weight::from_parts(299_878_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(21 as u64)) - .saturating_add(T::DbWeight::get().writes(15 as u64)) + // Proof Size summary in bytes: + // Measured: `42791` + // Estimated: `48731` + // Minimum execution time: 428_757_000 picoseconds. + Weight::from_parts(449_681_000, 0) + .saturating_add(Weight::from_parts(0, 48731)) + .saturating_add(T::DbWeight::get().reads(24)) + .saturating_add(T::DbWeight::get().writes(17)) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:2 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Paras PastCodeMeta (r:1 w:0) - // Storage: Paras CurrentCodeHash (r:1 w:0) - // Storage: Ump RelayDispatchQueueSize (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) - fn enter_backed_candidates_variable(_v: u32) -> Weight { - Weight::from_parts(442_472_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(25 as u64)) - .saturating_add(T::DbWeight::get().writes(14 as u64)) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailability` (r:2 w:1) + /// Proof: `ParaInclusion::PendingAvailability` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailabilityCommitments` (r:1 w:1) + /// Proof: `ParaInclusion::PendingAvailabilityCommitments` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `v` is `[101, 200]`. + fn enter_backed_candidates_variable(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `42863` + // Estimated: `48803` + // Minimum execution time: 1_276_079_000 picoseconds. + Weight::from_parts(1_313_585_212, 0) + .saturating_add(Weight::from_parts(0, 48803)) + // Standard Error: 18_279 + .saturating_add(Weight::from_parts(43_528, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(27)) + .saturating_add(T::DbWeight::get().writes(16)) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:2 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Paras PastCodeMeta (r:1 w:0) - // Storage: Paras CurrentCodeHash (r:1 w:0) - // Storage: Ump RelayDispatchQueueSize (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailability` (r:2 w:1) + /// Proof: `ParaInclusion::PendingAvailability` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailabilityCommitments` (r:1 w:1) + /// Proof: `ParaInclusion::PendingAvailabilityCommitments` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_backed_candidate_code_upgrade() -> Weight { - Weight::from_parts(36_903_411_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(25 as u64)) - .saturating_add(T::DbWeight::get().writes(14 as u64)) + // Proof Size summary in bytes: + // Measured: `42876` + // Estimated: `48816` + // Minimum execution time: 34_352_245_000 picoseconds. + Weight::from_parts(34_587_559_000, 0) + .saturating_add(Weight::from_parts(0, 48816)) + .saturating_add(T::DbWeight::get().reads(29)) + .saturating_add(T::DbWeight::get().writes(16)) } } diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 9753a4093045b52c8c709cfcea4a19a15dc510d6..bdf688646052133124f58f2ece16672c9e1c64fa 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -155,6 +155,7 @@ runtime-benchmarks = [ "pallet-staking/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index b74def5de8aadd718df48b27aff1a7ca324389c3..a0617b3108f8135bede66a6e082c41095a2e9704 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -238,6 +238,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = frame_support::weights::ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = (); } parameter_types! { @@ -393,7 +394,7 @@ where let current_block = System::block_number().saturated_into::().saturating_sub(1); let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -405,16 +406,17 @@ where frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ); - let raw_payload = SignedPayload::new(call, extra) + ) + .into(); + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; - let (call, extra, _) = raw_payload.deconstruct(); + let (call, tx_ext, _) = raw_payload.deconstruct(); let address = Indices::unlookup(account); - Some((call, (address, signature, extra))) + Some((call, (address, signature, tx_ext))) } } @@ -442,12 +444,32 @@ parameter_types! { pub Prefix: &'static [u8] = b"Pay KSMs to the Kusama account:"; } +#[cfg(feature = "runtime-benchmarks")] +pub struct ClaimsHelper; + +#[cfg(feature = "runtime-benchmarks")] +use frame_support::dispatch::DispatchInfo; + +#[cfg(feature = "runtime-benchmarks")] +impl claims::BenchmarkHelperTrait for ClaimsHelper { + fn default_call_and_info() -> (RuntimeCall, DispatchInfo) { + use frame_support::dispatch::GetDispatchInfo; + let call = RuntimeCall::Claims(claims::Call::attest { + statement: claims::StatementKind::Regular.to_text().to_vec(), + }); + let info = call.get_dispatch_info(); + (call, info) + } +} + impl claims::Config for Runtime { type RuntimeEvent = RuntimeEvent; type VestingSchedule = Vesting; type Prefix = Prefix; type MoveClaimOrigin = frame_system::EnsureRoot; type WeightInfo = claims::TestWeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = ClaimsHelper; } parameter_types! { @@ -728,8 +750,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The `SignedExtension` to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -741,7 +763,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -752,7 +774,7 @@ pub type Executive = frame_executive::Executive< AllPalletsWithSystem, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; pub type Hash = ::Hash; pub type Extrinsic = ::Extrinsic; @@ -767,7 +789,7 @@ sp_api::impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 4180828bcfb14d497d9da06bc74da91fa53f8d5f..6899edeeaeb8041f9eeb2cd3391efe89f3e15ed4 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -270,6 +270,7 @@ runtime-benchmarks = [ "pallet-state-trie-migration/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index df0abef618ec361a95c274a419ac1ffb42ffdc99..2ff08450b395cffc7ea28ae666419472f891109f 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -150,7 +150,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 1_007_001, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 24, @@ -200,6 +200,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type MaxConsumers = frame_support::traits::ConstU32<16>; } @@ -385,6 +386,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -820,7 +822,7 @@ where // so the actual block number is `n`. .saturating_sub(1); let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -832,16 +834,17 @@ where frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ); - let raw_payload = SignedPayload::new(call, extra) + ) + .into(); + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; - let (call, extra, _) = raw_payload.deconstruct(); + let (call, tx_ext, _) = raw_payload.deconstruct(); let address = ::Lookup::unlookup(account); - Some((call, (address, signature, extra))) + Some((call, (address, signature, tx_ext))) } } @@ -1548,8 +1551,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The `SignedExtension` to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -1699,6 +1702,7 @@ pub mod migrations { // Migrate Identity pallet for Usernames pallet_identity::migration::versioned::V0ToV1, parachains_configuration::migration::v11::MigrateToV11, + parachains_configuration::migration::v12::MigrateToV12, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, // Migrate from legacy lease to coretime. Needs to run after configuration v11 @@ -1712,7 +1716,7 @@ pub mod migrations { /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -1723,7 +1727,7 @@ pub type Executive = frame_executive::Executive< Migrations, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; #[cfg(feature = "runtime-benchmarks")] mod benches { @@ -1769,7 +1773,9 @@ mod benches { [pallet_staking, Staking] [pallet_sudo, Sudo] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_treasury, Treasury] [pallet_utility, Utility] [pallet_vesting, Vesting] @@ -1793,7 +1799,7 @@ sp_api::impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -2311,6 +2317,7 @@ sp_api::impl_runtime_apis! { use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; @@ -2339,6 +2346,7 @@ sp_api::impl_runtime_apis! { use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; impl pallet_session_benchmarking::Config for Runtime {} @@ -2411,6 +2419,13 @@ sp_api::impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::here()), + fun: Fungible(ExistentialDeposit::get()), + } + } } impl frame_system_benchmarking::Config for Runtime {} impl pallet_nomination_pools_benchmarking::Config for Runtime {} diff --git a/polkadot/runtime/westend/src/weights/frame_system_extensions.rs b/polkadot/runtime/westend/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..e4e1a799ec6e6213e2d7613c6a18a25948298a11 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,116 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-20, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// pallet +// --steps=2 +// --repeat=2 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --pallet=frame-system-extensions +// --chain=westend-dev +// --output=./polkadot/runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_356_000 picoseconds. + Weight::from_parts(7_805_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_510_000 picoseconds. + Weight::from_parts(10_009_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 761_000 picoseconds. + Weight::from_parts(4_308_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_396_000 picoseconds. + Weight::from_parts(7_585_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 521_000 picoseconds. + Weight::from_parts(4_168_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 541_000 picoseconds. + Weight::from_parts(4_228_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 3_807_000 picoseconds. + Weight::from_parts(8_025_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/westend/src/weights/mod.rs b/polkadot/runtime/westend/src/weights/mod.rs index f6a9008d71876726dda2ad240ecdb4588a1f82a3..8aeb42165504998c06ea5e73d4e2edc452d05b45 100644 --- a/polkadot/runtime/westend/src/weights/mod.rs +++ b/polkadot/runtime/westend/src/weights/mod.rs @@ -17,6 +17,7 @@ pub mod frame_election_provider_support; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_rate; pub mod pallet_bags_list; pub mod pallet_balances; @@ -37,6 +38,7 @@ pub mod pallet_session; pub mod pallet_staking; pub mod pallet_sudo; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_vesting; diff --git a/polkadot/runtime/westend/src/weights/pallet_sudo.rs b/polkadot/runtime/westend/src/weights/pallet_sudo.rs index e9ab3ad37a4cca6483dc7fdeb98562dd6f1937e6..649c43e031dc4a194bed6564e8e01dc823890c8a 100644 --- a/polkadot/runtime/westend/src/weights/pallet_sudo.rs +++ b/polkadot/runtime/westend/src/weights/pallet_sudo.rs @@ -94,4 +94,15 @@ impl pallet_sudo::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 2_875_000 picoseconds. + Weight::from_parts(6_803_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + } } diff --git a/polkadot/runtime/westend/src/weights/pallet_transaction_payment.rs b/polkadot/runtime/westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..001a09f103dafcba1955ef14e70f8bbd8113fcb8 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,65 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// pallet +// --steps=2 +// --repeat=2 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --pallet=pallet_transaction_payment +// --chain=westend-dev +// --output=./polkadot/runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `156` + // Estimated: `1641` + // Minimum execution time: 30_888_000 picoseconds. + Weight::from_parts(41_308_000, 0) + .saturating_add(Weight::from_parts(0, 1641)) + .saturating_add(T::DbWeight::get().reads(3)) + } +} diff --git a/polkadot/runtime/westend/src/weights/pallet_xcm.rs b/polkadot/runtime/westend/src/weights/pallet_xcm.rs index 493acd0f9e7bdfbfd1e716b7e82a474643f1050b..10725cecf24995e34b3254baa23535cb91dd9981 100644 --- a/polkadot/runtime/westend/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/westend/src/weights/pallet_xcm.rs @@ -16,26 +16,24 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=westend-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=pallet_xcm // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=westend-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/westend/src/weights/ @@ -50,10 +48,6 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - fn transfer_assets() -> Weight { - // TODO: run benchmarks - Weight::zero() - } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) @@ -64,29 +58,73 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 28_098_000 picoseconds. - Weight::from_parts(28_887_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 25_725_000 picoseconds. + Weight::from_parts(26_174_000, 0) + .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 17_609_000 picoseconds. - Weight::from_parts(18_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `250` + // Estimated: `6196` + // Minimum execution time: 113_140_000 picoseconds. + Weight::from_parts(116_204_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 17_007_000 picoseconds. - Weight::from_parts(17_471_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `302` + // Estimated: `6196` + // Minimum execution time: 108_571_000 picoseconds. + Weight::from_parts(110_650_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `250` + // Estimated: `6196` + // Minimum execution time: 111_836_000 picoseconds. + Weight::from_parts(114_435_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -104,8 +142,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_444_000 picoseconds. - Weight::from_parts(7_671_000, 0) + // Minimum execution time: 7_160_000 picoseconds. + Weight::from_parts(7_477_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -113,8 +151,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_126_000 picoseconds. - Weight::from_parts(2_253_000, 0) + // Minimum execution time: 1_934_000 picoseconds. + Weight::from_parts(2_053_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -133,11 +171,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 31_318_000 picoseconds. - Weight::from_parts(32_413_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 31_123_000 picoseconds. + Weight::from_parts(31_798_000, 0) + .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -155,11 +193,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `289` - // Estimated: `3754` - // Minimum execution time: 35_282_000 picoseconds. - Weight::from_parts(35_969_000, 0) - .saturating_add(Weight::from_parts(0, 3754)) + // Measured: `327` + // Estimated: `3792` + // Minimum execution time: 35_175_000 picoseconds. + Weight::from_parts(36_098_000, 0) + .saturating_add(Weight::from_parts(0, 3792)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -169,45 +207,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_247_000 picoseconds. - Weight::from_parts(2_381_000, 0) + // Minimum execution time: 1_974_000 picoseconds. + Weight::from_parts(2_096_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmPallet::SupportedVersion` (r:4 w:2) + /// Storage: `XcmPallet::SupportedVersion` (r:5 w:2) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `26` - // Estimated: `10916` - // Minimum execution time: 14_512_000 picoseconds. - Weight::from_parts(15_042_000, 0) - .saturating_add(Weight::from_parts(0, 10916)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `22` + // Estimated: `13387` + // Minimum execution time: 16_626_000 picoseconds. + Weight::from_parts(17_170_000, 0) + .saturating_add(Weight::from_parts(0, 13387)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifiers` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifiers` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `30` - // Estimated: `10920` - // Minimum execution time: 14_659_000 picoseconds. - Weight::from_parts(15_164_000, 0) - .saturating_add(Weight::from_parts(0, 10920)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `26` + // Estimated: `13391` + // Minimum execution time: 16_937_000 picoseconds. + Weight::from_parts(17_447_000, 0) + .saturating_add(Weight::from_parts(0, 13391)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `40` - // Estimated: `13405` - // Minimum execution time: 16_261_000 picoseconds. - Weight::from_parts(16_986_000, 0) - .saturating_add(Weight::from_parts(0, 13405)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15880` + // Minimum execution time: 19_157_000 picoseconds. + Weight::from_parts(19_659_000, 0) + .saturating_add(Weight::from_parts(0, 15880)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -221,38 +259,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `6085` - // Minimum execution time: 30_539_000 picoseconds. - Weight::from_parts(31_117_000, 0) - .saturating_add(Weight::from_parts(0, 6085)) + // Measured: `183` + // Estimated: `6123` + // Minimum execution time: 30_699_000 picoseconds. + Weight::from_parts(31_537_000, 0) + .saturating_add(Weight::from_parts(0, 6123)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:3 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `69` - // Estimated: `8484` - // Minimum execution time: 9_463_000 picoseconds. - Weight::from_parts(9_728_000, 0) - .saturating_add(Weight::from_parts(0, 8484)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `10959` + // Minimum execution time: 12_303_000 picoseconds. + Weight::from_parts(12_670_000, 0) + .saturating_add(Weight::from_parts(0, 10959)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `37` - // Estimated: `10927` - // Minimum execution time: 15_169_000 picoseconds. - Weight::from_parts(15_694_000, 0) - .saturating_add(Weight::from_parts(0, 10927)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `33` + // Estimated: `13398` + // Minimum execution time: 17_129_000 picoseconds. + Weight::from_parts(17_668_000, 0) + .saturating_add(Weight::from_parts(0, 13398)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -264,12 +302,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `149` - // Estimated: `11039` - // Minimum execution time: 37_549_000 picoseconds. - Weight::from_parts(38_203_000, 0) - .saturating_add(Weight::from_parts(0, 11039)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `183` + // Estimated: `13548` + // Minimum execution time: 39_960_000 picoseconds. + Weight::from_parts(41_068_000, 0) + .saturating_add(Weight::from_parts(0, 13548)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) @@ -280,8 +318,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_947_000 picoseconds. - Weight::from_parts(3_117_000, 0) + // Minimum execution time: 2_333_000 picoseconds. + Weight::from_parts(2_504_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -292,10 +330,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 24_595_000 picoseconds. - Weight::from_parts(24_907_000, 0) + // Minimum execution time: 22_932_000 picoseconds. + Weight::from_parts(23_307_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 34_558_000 picoseconds. + Weight::from_parts(35_299_000, 0) + .saturating_add(Weight::from_parts(0, 3488)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs index 3a4813b667c68ea6400c9ad58cea8fd1bfece5d0..8fa3207c6446082a97877f488913d9c063114fca 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -58,8 +58,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 8_065_000 picoseconds. - Weight::from_parts(8_389_000, 0) + // Minimum execution time: 7_775_000 picoseconds. + Weight::from_parts(8_036_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -74,8 +74,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 8_038_000 picoseconds. - Weight::from_parts(8_463_000, 0) + // Minimum execution time: 7_708_000 picoseconds. + Weight::from_parts(7_971_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -90,8 +90,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_843_000 picoseconds. - Weight::from_parts(8_216_000, 0) + // Minimum execution time: 7_746_000 picoseconds. + Weight::from_parts(8_028_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -116,8 +116,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_969_000 picoseconds. - Weight::from_parts(8_362_000, 0) + // Minimum execution time: 7_729_000 picoseconds. + Weight::from_parts(7_954_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -132,8 +132,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_084_000 picoseconds. - Weight::from_parts(10_451_000, 0) + // Minimum execution time: 9_871_000 picoseconds. + Weight::from_parts(10_075_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -148,8 +148,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_948_000 picoseconds. - Weight::from_parts(8_268_000, 0) + // Minimum execution time: 7_869_000 picoseconds. + Weight::from_parts(8_000_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -164,8 +164,24 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_257_000 picoseconds. - Weight::from_parts(10_584_000, 0) + // Minimum execution time: 9_797_000 picoseconds. + Weight::from_parts(10_373_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_scheduler_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_718_000 picoseconds. + Weight::from_parts(7_984_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index e3ea2fb8c06dfb0f3b5e03e4434993bd0d7c35ff..ed42f93692b406ea07b2edbda0fbdb315c2b4071 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -79,6 +79,13 @@ pub trait Config: crate::Config { fn set_up_complex_asset_transfer() -> Option<(Assets, u32, Location, Box)> { None } + + /// Gets an asset that can be handled by the AssetTransactor. + /// + /// Used only in benchmarks. + /// + /// Used, for example, in the benchmark for `claim_assets`. + fn get_asset() -> Asset; } benchmarks! { @@ -341,11 +348,23 @@ benchmarks! { u32::MAX, ).unwrap()).collect::>(); crate::Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); - }: { as QueryHandler>::take_response(query_id); } + claim_assets { + let claim_origin = RawOrigin::Signed(whitelisted_caller()); + let claim_location = T::ExecuteXcmOrigin::try_origin(claim_origin.clone().into()).map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + let asset: Asset = T::get_asset(); + // Trap assets for claiming later + crate::Pallet::::drop_assets( + &claim_location, + asset.clone().into(), + &XcmContext { origin: None, message_id: [0u8; 32], topic: None } + ); + let versioned_assets = VersionedAssets::V4(asset.into()); + }: _>(claim_origin.into(), Box::new(versioned_assets), Box::new(VersionedLocation::V4(claim_location))) + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext_with_balances(Vec::new()), diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 5e1a3e55f9b62b67bb3cedb6a279a1c4c9046405..1a1f8b402a39041ffb5852dd56e9725a550afc7c 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -85,6 +85,7 @@ pub trait WeightInfo { fn migrate_and_notify_old_targets() -> Weight; fn new_query() -> Weight; fn take_response() -> Weight; + fn claim_assets() -> Weight; } /// fallback implementation @@ -165,6 +166,10 @@ impl WeightInfo for TestWeightInfo { fn take_response() -> Weight { Weight::from_parts(100_000_000, 0) } + + fn claim_assets() -> Weight { + Weight::from_parts(100_000_000, 0) + } } #[frame_support::pallet] @@ -1386,6 +1391,64 @@ pub mod pallet { weight_limit, ) } + + /// Claims assets trapped on this pallet because of leftover assets during XCM execution. + /// + /// - `origin`: Anyone can call this extrinsic. + /// - `assets`: The exact assets that were trapped. Use the version to specify what version + /// was the latest when they were trapped. + /// - `beneficiary`: The location/account where the claimed assets will be deposited. + #[pallet::call_index(12)] + #[pallet::weight({ + let assets_version = assets.identify_version(); + let maybe_assets: Result = (*assets.clone()).try_into(); + let maybe_beneficiary: Result = (*beneficiary.clone()).try_into(); + match (maybe_assets, maybe_beneficiary) { + (Ok(assets), Ok(beneficiary)) => { + let ticket: Location = GeneralIndex(assets_version as u128).into(); + let mut message = Xcm(vec![ + ClaimAsset { assets: assets.clone(), ticket }, + DepositAsset { assets: AllCounted(assets.len() as u32).into(), beneficiary }, + ]); + T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::claim_assets().saturating_add(w)) + } + _ => Weight::MAX + } + })] + pub fn claim_assets( + origin: OriginFor, + assets: Box, + beneficiary: Box, + ) -> DispatchResult { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + log::debug!(target: "xcm::pallet_xcm::claim_assets", "origin: {:?}, assets: {:?}, beneficiary: {:?}", origin_location, assets, beneficiary); + // Extract version from `assets`. + let assets_version = assets.identify_version(); + let assets: Assets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + let number_of_assets = assets.len() as u32; + let beneficiary: Location = + (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; + let ticket: Location = GeneralIndex(assets_version as u128).into(); + let mut message = Xcm(vec![ + ClaimAsset { assets, ticket }, + DepositAsset { assets: AllCounted(number_of_assets).into(), beneficiary }, + ]); + let weight = + T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; + let mut hash = message.using_encoded(sp_io::hashing::blake2_256); + let outcome = T::XcmExecutor::prepare_and_execute( + origin_location, + message, + &mut hash, + weight, + weight, + ); + outcome.ensure_complete().map_err(|error| { + log::error!(target: "xcm::pallet_xcm::claim_assets", "XCM execution failed with error: {:?}", error); + Error::::LocalExecutionIncomplete + })?; + Ok(()) + } } } diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 17c181387dde480df9ae76798aa6649d1637dcf5..d5ba7312a3a55dcddbc24c64e613985fca0c7bb9 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -172,7 +172,7 @@ impl SendXcm for TestSendXcm { msg: &mut Option>, ) -> SendResult<(Location, Xcm<()>)> { if FAIL_SEND_XCM.with(|q| *q.borrow()) { - return Err(SendError::Transport("Intentional send failure used in tests")) + return Err(SendError::Transport("Intentional send failure used in tests")); } let pair = (dest.take().unwrap(), msg.take().unwrap()); Ok((pair, Assets::new())) @@ -654,6 +654,10 @@ impl super::benchmarking::Config for Test { }); Some((assets, fee_index as u32, dest, verify)) } + + fn get_asset() -> Asset { + Asset { id: AssetId(Location::here()), fun: Fungible(ExistentialDeposit::get()) } + } } pub(crate) fn last_event() -> RuntimeEvent { diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 28c7d197443b070423bfb694593c6feb0d471534..13022d9a8b1f69f528393ee8ac16e3d62151bae9 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -467,6 +467,57 @@ fn trapped_assets_can_be_claimed() { }); } +// Like `trapped_assets_can_be_claimed` but using the `claim_assets` extrinsic. +#[test] +fn claim_assets_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + new_test_ext_with_balances(balances).execute_with(|| { + // First trap some assets. + let trapping_program = + Xcm::builder_unsafe().withdraw_asset((Here, SEND_AMOUNT).into()).build(); + // Even though assets are trapped, the extrinsic returns success. + assert_ok!(XcmPallet::execute( + RuntimeOrigin::signed(ALICE), + Box::new(VersionedXcm::V4(trapping_program)), + BaseXcmWeight::get() * 2, + )); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); + + // Expected `AssetsTrapped` event info. + let source: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + let versioned_assets = VersionedAssets::V4(Assets::from((Here, SEND_AMOUNT))); + let hash = BlakeTwo256::hash_of(&(source.clone(), versioned_assets.clone())); + + // Assets were indeed trapped. + assert_eq!( + last_events(2), + vec![ + RuntimeEvent::XcmPallet(crate::Event::AssetsTrapped { + hash, + origin: source, + assets: versioned_assets + }), + RuntimeEvent::XcmPallet(crate::Event::Attempted { + outcome: Outcome::Complete { used: BaseXcmWeight::get() * 1 } + }) + ], + ); + let trapped = AssetTraps::::iter().collect::>(); + assert_eq!(trapped, vec![(hash, 1)]); + + // Now claim them with the extrinsic. + assert_ok!(XcmPallet::claim_assets( + RuntimeOrigin::signed(ALICE), + Box::new(VersionedAssets::V4((Here, SEND_AMOUNT).into())), + Box::new(VersionedLocation::V4( + AccountId32 { network: None, id: ALICE.clone().into() }.into() + )), + )); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + assert_eq!(AssetTraps::::iter().collect::>(), vec![]); + }); +} + /// Test failure to complete execution reverts intermediate side-effects. /// /// XCM program will withdraw and deposit some assets, then fail execution of a further withdraw. diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index c90ad25c0d83d0bf155a766cf341d5b646e76800..ba8d726aecff4989417373fc9e14c174d5a5277f 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -165,6 +165,15 @@ macro_rules! versioned_type { <$v3>::max_encoded_len() } } + impl IdentifyVersion for $n { + fn identify_version(&self) -> Version { + use $n::*; + match self { + V3(_) => v3::VERSION, + V4(_) => v4::VERSION, + } + } + } }; ($(#[$attr:meta])* pub enum $n:ident { @@ -287,6 +296,16 @@ macro_rules! versioned_type { <$v3>::max_encoded_len() } } + impl IdentifyVersion for $n { + fn identify_version(&self) -> Version { + use $n::*; + match self { + V2(_) => v2::VERSION, + V3(_) => v3::VERSION, + V4(_) => v4::VERSION, + } + } + } }; } @@ -493,6 +512,12 @@ pub trait WrapVersion { ) -> Result, ()>; } +/// Used to get the version out of a versioned type. +// TODO(XCMv5): This could be `GetVersion` and we change the current one to `GetVersionFor`. +pub trait IdentifyVersion { + fn identify_version(&self) -> Version; +} + /// Check and return the `Version` that should be used for the `Xcm` datum for the destination /// `Location`, which will interpret it. pub trait GetVersion { @@ -572,9 +597,9 @@ pub type AlwaysLts = AlwaysV4; pub mod prelude { pub use super::{ latest::prelude::*, AlwaysLatest, AlwaysLts, AlwaysV2, AlwaysV3, AlwaysV4, GetVersion, - IntoVersion, Unsupported, Version as XcmVersion, VersionedAsset, VersionedAssetId, - VersionedAssets, VersionedInteriorLocation, VersionedLocation, VersionedResponse, - VersionedXcm, WrapVersion, + IdentifyVersion, IntoVersion, Unsupported, Version as XcmVersion, VersionedAsset, + VersionedAssetId, VersionedAssets, VersionedInteriorLocation, VersionedLocation, + VersionedResponse, VersionedXcm, WrapVersion, }; } diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 10726b0f511909c5cc4509746c09b731d32b2faf..660a30605e53afcabc76d43335d131297a1fa23c 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -47,6 +47,7 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-salary/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index 9892c500f2eebb7cd4a3cfd88f86c6669cca4111..3bd2fa4b4f5309eac6bd2ac54ece79851c134613 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -25,14 +25,22 @@ use frame_support::{ traits::{ConstU32, Everything}, }; use frame_system::{EnsureRoot, EnsureSigned}; -use polkadot_test_runtime::SignedExtra; use primitives::{AccountIndex, BlakeTwo256, Signature}; use sp_runtime::{generic, traits::MaybeEquivalence, AccountId32, BuildStorage}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; +pub type TxExtension = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckMortality, + frame_system::CheckNonce, + frame_system::CheckWeight, +); pub type Address = sp_runtime::MultiAddress; pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Header = generic::Header; pub type Block = generic::Block; diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index a20390b64f946db2633ae2538b4a869b53fa338a..f953e54111c2e4cae5058b6a201b1a15e4ceb5f4 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -46,13 +46,13 @@ use xcm_builder::{ }; use xcm_executor::{Config, XcmExecutor}; -pub type SignedExtra = (frame_system::CheckNonZeroSender,); +pub type TxExtension = (frame_system::CheckNonZeroSender,); pub type BlockNumber = u64; pub type Address = MultiAddress; pub type Header = generic::Header; pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Block = generic::Block; pub type Signature = MultiSignature; diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 5bf65fa9f9ac407ead13a008919aa2796867ccdd..a11e535492cb696c47981de5b5d3a81e297b8762 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -45,13 +45,13 @@ use xcm_builder::{ }; use xcm_executor::{Config, XcmExecutor}; -pub type SignedExtra = (frame_system::CheckNonZeroSender,); +pub type TxExtension = (frame_system::CheckNonZeroSender,); pub type BlockNumber = u64; pub type Address = MultiAddress; pub type Header = generic::Header; pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Block = generic::Block; pub type Signature = MultiSignature; diff --git a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml index f6bdfeb4877e1dbd5f293f8c0ec864f4f7f1b80d..2561661de1f8408b2ed2808f1f5e41b4286ce97e 100644 --- a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml +++ b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml @@ -2,9 +2,11 @@ timeout = 1000 [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 5 needed_approvals = 8 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 5 + [relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] max_approval_coalesce_count = 5 diff --git a/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml b/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml index 5d6f299d46133e52c645bd128fbdcc8171490d4e..a2a2621f8426d2e8deb32f524d23e666ab1b5c00 100644 --- a/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml +++ b/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml @@ -2,8 +2,10 @@ timeout = 1000 bootnode = true -[relaychain.genesis.runtimeGenesis.patch.configuration.config] +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] max_validators_per_core = 1 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] needed_approvals = 2 [relaychain] diff --git a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml index e2fbec079b1a81ec50054a30ef89d049bb717482..a3bbc82e74ba6d9d5c442742afc2dd18f31c7305 100644 --- a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml +++ b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml @@ -3,8 +3,10 @@ timeout = 1000 bootnode = true [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 group_rotation_frequency = 2 [relaychain] diff --git a/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml b/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml index bef54cb8ca416fb3210849fb9801c44a31e846a4..858f87b9cfe52c10c9cff9885dbeb57b0dcba2f5 100644 --- a/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml +++ b/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml @@ -3,10 +3,12 @@ timeout = 1000 bootnode = true [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 7 relay_vrf_modulo_samples = 5 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" diff --git a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml index 69eb0804d8cb70c58fbc70abdb091af3a197d2fb..573ccf961385fa2c721caa0bb67ed97b94deb5a1 100644 --- a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml +++ b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml @@ -2,9 +2,11 @@ timeout = 1000 [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 1 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" diff --git a/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml b/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml index 1ea385c3a42ee8fdf89b7595151a98c26d9b011b..ea1c93a1403fb837b45a6da01d0920c516e43ae6 100644 --- a/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml +++ b/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml @@ -2,9 +2,11 @@ timeout = 1000 [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 1 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" diff --git a/polkadot/zombienet_tests/functional/0010-validator-disabling.toml b/polkadot/zombienet_tests/functional/0010-validator-disabling.toml index 6701d60d74d147d517fc55dd3f1253171f6b807f..c9d79c5f8f236918cf409fd684b7d0b8d9792d33 100644 --- a/polkadot/zombienet_tests/functional/0010-validator-disabling.toml +++ b/polkadot/zombienet_tests/functional/0010-validator-disabling.toml @@ -3,8 +3,10 @@ timeout = 1000 bootnode = true [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 group_rotation_frequency = 10 [relaychain] diff --git a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml b/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml index 5a6832b149be21a48ca17fe6e84f75cfc3324ed0..b776622fdce33df2e9a78debc31ee3e62ae4805d 100644 --- a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml +++ b/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml @@ -8,13 +8,16 @@ chain = "rococo-local" [relaychain.genesis.runtimeGenesis.patch.configuration.config] needed_approvals = 4 relay_vrf_modulo_samples = 6 - scheduling_lookahead = 2 - group_rotation_frequency = 4 [relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] max_candidate_depth = 3 allowed_ancestry_len = 2 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + lookahead = 2 + group_rotation_frequency = 4 + + [relaychain.default_resources] limits = { memory = "4G", cpu = "2" } requests = { memory = "2G", cpu = "1" } diff --git a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml index 0dfd814e10a5ecd4437f469b9869dff5c27047f3..9b3576eaa3c212bd9490095c12ae4bca65f6fa54 100644 --- a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml +++ b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml @@ -3,9 +3,11 @@ timeout = 1000 bootnode = true [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 2 needed_approvals = 4 - coretime_cores = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 2 + num_cores = 2 [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" diff --git a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl index a7193c9282b961f00134c2336695d4f2737a5b91..edc87c802a0965b6fd685db44ccbf50e807f1ed3 100644 --- a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl +++ b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl @@ -1,4 +1,4 @@ -Description: Test that a paraid acquiring multiple cores does not brick itself if ElasticScalingMVP feature is enabled +Description: Test that a paraid acquiring multiple cores does not brick itself if ElasticScalingMVP feature is enabled in genesis Network: ./0012-elastic-scaling-mvp.toml Creds: config @@ -15,14 +15,5 @@ alice: js-script ./0012-register-para.js return is 0 within 600 seconds validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds -# Parachain will now be stalled -validator: parachain 2000 block height is lower than 20 within 300 seconds - -# Enable the ElasticScalingMVP node feature. -alice: js-script ./0012-enable-node-feature.js with "1" return is 0 within 600 seconds - -# Wait two sessions for the config to be updated. -sleep 120 seconds - # Ensure parachain is now making progress. validator: parachain 2000 block height is at least 30 within 200 seconds diff --git a/polkadot/zombienet_tests/functional/0012-enable-node-feature.js b/polkadot/zombienet_tests/functional/0012-enable-node-feature.js deleted file mode 100644 index 4822e1f664478a93579c58e82db0f694b7008ee8..0000000000000000000000000000000000000000 --- a/polkadot/zombienet_tests/functional/0012-enable-node-feature.js +++ /dev/null @@ -1,37 +0,0 @@ -async function run(nodeName, networkInfo, index) { - const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; - const api = await zombie.connect(wsUri, userDefinedTypes); - - await zombie.util.cryptoWaitReady(); - - // account to submit tx - const keyring = new zombie.Keyring({ type: "sr25519" }); - const alice = keyring.addFromUri("//Alice"); - - await new Promise(async (resolve, reject) => { - const unsub = await api.tx.sudo - .sudo(api.tx.configuration.setNodeFeature(Number(index), true)) - .signAndSend(alice, ({ status, isError }) => { - if (status.isInBlock) { - console.log( - `Transaction included at blockhash ${status.asInBlock}`, - ); - } else if (status.isFinalized) { - console.log( - `Transaction finalized at blockHash ${status.asFinalized}`, - ); - unsub(); - return resolve(); - } else if (isError) { - console.log(`Transaction error`); - reject(`Transaction error`); - } - }); - }); - - - - return 0; -} - -module.exports = { run }; diff --git a/polkadot/zombienet_tests/misc/0001-paritydb.toml b/polkadot/zombienet_tests/misc/0001-paritydb.toml index 399f848d3ac49e7e9950617c170c13d5c63593dd..b3ce2081b1119ec237c1b3d6e14e1e4aed8322c5 100644 --- a/polkadot/zombienet_tests/misc/0001-paritydb.toml +++ b/polkadot/zombienet_tests/misc/0001-paritydb.toml @@ -3,9 +3,11 @@ timeout = 1000 bootnode = true [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 3 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" diff --git a/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl b/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl index db0a60ac1df617e5c89dc6a1385c4c106c1ead05..5fe1b2ad2f1a7589a28f8b1b8da05d6f35d3d59b 100644 --- a/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl +++ b/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl @@ -10,9 +10,8 @@ dave: parachain 2001 block height is at least 10 within 200 seconds # POLKADOT_PR_ARTIFACTS_URL=https://gitlab.parity.io/parity/mirrors/polkadot/-/jobs/1842869/artifacts/raw/artifacts # with the version of polkadot you want to download. -# avg 30s in our infra -alice: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 60 seconds -bob: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 60 seconds +alice: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 240 seconds +bob: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 240 seconds # update the cmd to add the flag '--insecure-validator-i-know-what-i-do' # once the base image include the version with this flag we can remove this logic. alice: run ./0002-update-cmd.sh within 60 seconds diff --git a/prdoc/1.4.0/pr_1246.prdoc b/prdoc/1.4.0/pr_1246.prdoc index a4d270c45cb5915760ddfbd60e5e4b3b7c08cd4a..3b5c2017f22acfba25471595c50015ae14c3e5cd 100644 --- a/prdoc/1.4.0/pr_1246.prdoc +++ b/prdoc/1.4.0/pr_1246.prdoc @@ -11,7 +11,7 @@ migrations: description: "Messages from the DMP dispatch queue will be moved over to the MQ pallet via `on_initialize`. This happens over multiple blocks and emits a `Completed` event at the end. The pallet can be un-deployed and deleted afterwards. Note that the migration reverses the order of messages, which should be acceptable as a one-off." crates: - - name: cumulus_pallet_xcmp_queue + - name: cumulus-pallet-xcmp-queue note: Pallet config must be altered according to the MR description. host_functions: [] diff --git a/prdoc/1.6.0/pr_2689.prdoc b/prdoc/1.6.0/pr_2689.prdoc index 847c3e8026cef9dfdf63f044a1b773be85b12921..5d3081e3a4ce71d872c8eb8a96fda5b2e273b684 100644 --- a/prdoc/1.6.0/pr_2689.prdoc +++ b/prdoc/1.6.0/pr_2689.prdoc @@ -1,7 +1,7 @@ # Schema: Parity PR Documentation Schema (prdoc) # See doc at https://github.com/paritytech/prdoc -title: BEEFY: Support compatibility with Warp Sync - Allow Warp Sync for Validators +title: "BEEFY: Support compatibility with Warp Sync - Allow Warp Sync for Validators" doc: - audience: Node Operator diff --git a/prdoc/1.6.0/pr_2771.prdoc b/prdoc/1.6.0/pr_2771.prdoc index 1b49162e4392ba1ad1a77d61e5b2289474b0ffbe..50fb99556ecddf39adbcece77d9b83e5d98651b4 100644 --- a/prdoc/1.6.0/pr_2771.prdoc +++ b/prdoc/1.6.0/pr_2771.prdoc @@ -6,4 +6,4 @@ doc: Enable better req-response protocol versioning, by allowing for fallback requests on different protocols. crates: - - name: sc_network + - name: sc-network diff --git a/prdoc/pr_1660.prdoc b/prdoc/1.8.0/pr_1660.prdoc similarity index 100% rename from prdoc/pr_1660.prdoc rename to prdoc/1.8.0/pr_1660.prdoc diff --git a/prdoc/pr_2061.prdoc b/prdoc/1.8.0/pr_2061.prdoc similarity index 100% rename from prdoc/pr_2061.prdoc rename to prdoc/1.8.0/pr_2061.prdoc diff --git a/prdoc/pr_2290.prdoc b/prdoc/1.8.0/pr_2290.prdoc similarity index 100% rename from prdoc/pr_2290.prdoc rename to prdoc/1.8.0/pr_2290.prdoc diff --git a/prdoc/pr_2903.prdoc b/prdoc/1.8.0/pr_2903.prdoc similarity index 100% rename from prdoc/pr_2903.prdoc rename to prdoc/1.8.0/pr_2903.prdoc diff --git a/prdoc/pr_2949-async-backing-on-all-testnet-system-chains.prdoc b/prdoc/1.8.0/pr_2949-async-backing-on-all-testnet-system-chains.prdoc similarity index 100% rename from prdoc/pr_2949-async-backing-on-all-testnet-system-chains.prdoc rename to prdoc/1.8.0/pr_2949-async-backing-on-all-testnet-system-chains.prdoc diff --git a/prdoc/pr_3007.prdoc b/prdoc/1.8.0/pr_3007.prdoc similarity index 100% rename from prdoc/pr_3007.prdoc rename to prdoc/1.8.0/pr_3007.prdoc diff --git a/prdoc/pr_3052.prdoc b/prdoc/1.8.0/pr_3052.prdoc similarity index 100% rename from prdoc/pr_3052.prdoc rename to prdoc/1.8.0/pr_3052.prdoc diff --git a/prdoc/pr_3060.prdoc b/prdoc/1.8.0/pr_3060.prdoc similarity index 100% rename from prdoc/pr_3060.prdoc rename to prdoc/1.8.0/pr_3060.prdoc diff --git a/prdoc/pr_3079.prdoc b/prdoc/1.8.0/pr_3079.prdoc similarity index 100% rename from prdoc/pr_3079.prdoc rename to prdoc/1.8.0/pr_3079.prdoc diff --git a/prdoc/pr_3154.prdoc b/prdoc/1.8.0/pr_3154.prdoc similarity index 100% rename from prdoc/pr_3154.prdoc rename to prdoc/1.8.0/pr_3154.prdoc diff --git a/prdoc/pr_3160.prdoc b/prdoc/1.8.0/pr_3160.prdoc similarity index 100% rename from prdoc/pr_3160.prdoc rename to prdoc/1.8.0/pr_3160.prdoc diff --git a/prdoc/pr_3166.prdoc b/prdoc/1.8.0/pr_3166.prdoc similarity index 100% rename from prdoc/pr_3166.prdoc rename to prdoc/1.8.0/pr_3166.prdoc diff --git a/prdoc/pr_3184.prdoc b/prdoc/1.8.0/pr_3184.prdoc similarity index 100% rename from prdoc/pr_3184.prdoc rename to prdoc/1.8.0/pr_3184.prdoc diff --git a/prdoc/pr_3212.prdoc b/prdoc/1.8.0/pr_3212.prdoc similarity index 100% rename from prdoc/pr_3212.prdoc rename to prdoc/1.8.0/pr_3212.prdoc diff --git a/prdoc/pr_3225.prdoc b/prdoc/1.8.0/pr_3225.prdoc similarity index 100% rename from prdoc/pr_3225.prdoc rename to prdoc/1.8.0/pr_3225.prdoc diff --git a/prdoc/pr_3230.prdoc b/prdoc/1.8.0/pr_3230.prdoc similarity index 100% rename from prdoc/pr_3230.prdoc rename to prdoc/1.8.0/pr_3230.prdoc diff --git a/prdoc/pr_3232.prdoc b/prdoc/1.8.0/pr_3232.prdoc similarity index 100% rename from prdoc/pr_3232.prdoc rename to prdoc/1.8.0/pr_3232.prdoc diff --git a/prdoc/pr_3243.prdoc b/prdoc/1.8.0/pr_3243.prdoc similarity index 100% rename from prdoc/pr_3243.prdoc rename to prdoc/1.8.0/pr_3243.prdoc diff --git a/prdoc/pr_3244.prdoc b/prdoc/1.8.0/pr_3244.prdoc similarity index 100% rename from prdoc/pr_3244.prdoc rename to prdoc/1.8.0/pr_3244.prdoc diff --git a/prdoc/pr_3272.prdoc b/prdoc/1.8.0/pr_3272.prdoc similarity index 100% rename from prdoc/pr_3272.prdoc rename to prdoc/1.8.0/pr_3272.prdoc diff --git a/prdoc/pr_3301.prdoc b/prdoc/1.8.0/pr_3301.prdoc similarity index 100% rename from prdoc/pr_3301.prdoc rename to prdoc/1.8.0/pr_3301.prdoc diff --git a/prdoc/pr_3308.prdoc b/prdoc/1.8.0/pr_3308.prdoc similarity index 100% rename from prdoc/pr_3308.prdoc rename to prdoc/1.8.0/pr_3308.prdoc diff --git a/prdoc/pr_3319.prdoc b/prdoc/1.8.0/pr_3319.prdoc similarity index 100% rename from prdoc/pr_3319.prdoc rename to prdoc/1.8.0/pr_3319.prdoc diff --git a/prdoc/pr_3325.prdoc b/prdoc/1.8.0/pr_3325.prdoc similarity index 100% rename from prdoc/pr_3325.prdoc rename to prdoc/1.8.0/pr_3325.prdoc diff --git a/prdoc/pr_3358.prdoc b/prdoc/1.8.0/pr_3358.prdoc similarity index 100% rename from prdoc/pr_3358.prdoc rename to prdoc/1.8.0/pr_3358.prdoc diff --git a/prdoc/pr_3361.prdoc b/prdoc/1.8.0/pr_3361.prdoc similarity index 100% rename from prdoc/pr_3361.prdoc rename to prdoc/1.8.0/pr_3361.prdoc diff --git a/prdoc/pr_3364.prdoc b/prdoc/1.8.0/pr_3364.prdoc similarity index 100% rename from prdoc/pr_3364.prdoc rename to prdoc/1.8.0/pr_3364.prdoc diff --git a/prdoc/pr_3370.prdoc b/prdoc/1.8.0/pr_3370.prdoc similarity index 100% rename from prdoc/pr_3370.prdoc rename to prdoc/1.8.0/pr_3370.prdoc diff --git a/prdoc/pr_3384.prdoc b/prdoc/1.8.0/pr_3384.prdoc similarity index 100% rename from prdoc/pr_3384.prdoc rename to prdoc/1.8.0/pr_3384.prdoc diff --git a/prdoc/pr_3395.prdoc b/prdoc/1.8.0/pr_3395.prdoc similarity index 100% rename from prdoc/pr_3395.prdoc rename to prdoc/1.8.0/pr_3395.prdoc diff --git a/prdoc/pr_3415.prdoc b/prdoc/1.8.0/pr_3415.prdoc similarity index 100% rename from prdoc/pr_3415.prdoc rename to prdoc/1.8.0/pr_3415.prdoc diff --git a/prdoc/pr_3435.prdoc b/prdoc/1.8.0/pr_3435.prdoc similarity index 100% rename from prdoc/pr_3435.prdoc rename to prdoc/1.8.0/pr_3435.prdoc diff --git a/prdoc/1.8.0/pr_3477.prdoc b/prdoc/1.8.0/pr_3477.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..26e96d3635b14634ca523797d741ddaf224aa9ae --- /dev/null +++ b/prdoc/1.8.0/pr_3477.prdoc @@ -0,0 +1,11 @@ +title: Allow parachain which acquires multiple coretime cores to make progress + +doc: + - audience: Node Operator + description: | + Adds the needed changes so that parachains which acquire multiple coretime cores can still make progress. + Only one of the cores will be able to be occupied at a time. + Only works if the ElasticScalingMVP node feature is enabled in the runtime and the block author validator is + updated to include this change. + +crates: [ ] diff --git a/prdoc/pr_1554.prdoc b/prdoc/pr_1554.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..bfce7c5edafd9df39ff9235f18536591b1d47158 --- /dev/null +++ b/prdoc/pr_1554.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Runtime Upgrade ref docs and Single Block Migration example pallet + +doc: + - audience: Runtime Dev + description: | + `frame_support::traits::GetStorageVersion::current_storage_version` has been renamed `frame_support::traits::GetStorageVersion::in_code_storage_version`. + A simple find-replace is sufficient to handle this change. + +crates: + - name: "frame-support" + diff --git a/prdoc/pr_1781.prdoc b/prdoc/pr_1781.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e3560842d15ab249539ac2de172dc5914e92cc19 --- /dev/null +++ b/prdoc/pr_1781.prdoc @@ -0,0 +1,45 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Multi-Block-Migrations, `poll` hook and new System Callbacks" + +doc: + - audience: Runtime Dev + description: | + The major things that this MR touches are: + + **Multi-Block-Migrations**: `pallet-migrations` is introduced that can be configured in the + `System` of a runtime to act as multi-block migrator. The `migrations` pallet then in turn + receives the list of MBMs as config parameter. The list of migrations can be an aggregated + tuple of `SteppedMigration` trait implementation. + It is paramount that the `migrations` pallet is configured in `System` once it is deployed. A + test is in place to double check this. + + To integrate this into your runtime, it is only necessary to change the return type of + `initialize_block` to `RuntimeExecutiveMode`. For extended info please see + https://github.com/paritytech/polkadot-sdk/pull/1781. + + **poll**: a new pallet hook named `poll` is added. This can be used for places where the code + that should be executed is not deadline critical. Runtime devs are advised to skim their usage + of `on_initialize` and `on_finalize` to see whether they can be replace with `poll`. `poll` is + not guaranteed to be called each block. In fact it will not be called when MBMs are ongoing. + + **System Callbacks**: The `system` pallet gets five new config items - all of which can be + safely set to `()` as default. They are: + - `SingleBlockMigrations`: replaces the `Executive` now for configuring migrations. + - `MultiBlockMigrator`: the `pallet-migrations` would be set here, if deployed. + - `PreInherents`: a hook that runs before any inherent. + - `PostInherents`: a hook to run between inherents and `poll`/MBM logic. + - `PostTransactions`: a hook to run after all transactions but before `on_idle`. + +crates: + - name: frame-executive + - name: frame-system + - name: frame-support + - name: frame-support-procedural + - name: pallet-migrations + - name: sc-basic-authorship + - name: sc-block-builder + - name: sp-api + - name: sp-api-proc-macro + - name: sp-runtime diff --git a/prdoc/pr_2280.prdoc b/prdoc/pr_2280.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..3026dc254e64b4e97f847f010afab2b42e9647c5 --- /dev/null +++ b/prdoc/pr_2280.prdoc @@ -0,0 +1,144 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: FRAME Create `TransactionExtension` as a replacement for `SignedExtension` + +doc: + - audience: Runtime User + description: | + Introduces a new trait `TransactionExtension` to replace `SignedExtension`. Introduce the + idea of transactions which obey the runtime's extensions and have according Extension data + (né Extra data) yet do not have hard-coded signatures. + + Deprecate the terminology of "Unsigned" when used for transactions/extrinsics owing to there + now being "proper" unsigned transactions which obey the extension framework and "old-style" + unsigned which do not. Instead we have `General` for the former and `Bare` for the latter. + Unsigned will be phased out as a type of transaction, and `Bare` will only be used for + Inherents. + + Types of extrinsic are now therefore + - Bare (no hardcoded signature, no Extra data; used to be known as "Unsigned") + - Bare transactions (deprecated) - Gossiped, validated with `ValidateUnsigned` + (deprecated) and the `_bare_compat` bits of `TransactionExtension` (deprecated). + - Inherents - Not gossiped, validated with `ProvideInherent`. + - Extended (Extra data) - Gossiped, validated via `TransactionExtension`. + - Signed transactions (with a hardcoded signature). + - General transactions (without a hardcoded signature). + + Notable information on `TransactionExtension` and the differences from `SignedExtension` + - `AdditionalSigned`/`additional_signed` is renamed to `Implicit`/`implicit`. It is encoded + for the entire transaction and passed in to each extension as a new argument to validate. + - `pre_dispatch` is renamed to `prepare`. + - `validate` runs transaction validation logic both off-chain and on-chain, and is + non-mutating. + - `prepare` runs on-chain pre-execution logic using information extracted during validation + and is mutating. + - `validate` and `prepare` are now passed an `Origin` rather than an `AccountId`. If the + extension logic presumes an `AccountId`, consider using the trait function + `AsSystemOriginSigner::as_system_origin_signer`. + - A signature on the underlying transaction may validly not be present. + - The origin may be altered during validation. + - Validation functionality present in `validate` should not be repeated in `prepare`. + Useful information obtained during `validate` should now be passsed in to `prepare` using + the new user-specifiable type `Val`. + - Unsigned logic should be migrated from the old `*_unsigned` functions into the regular + versions of the new functions where the `Origin` is `None`. + - The `Call` type defining the runtime call is now a type parameter. + - `TransactionExtension` now takes a `Context` type parameter. This defines some arbitrary + contextual data that is injected into the transaction extension logic. It is unused in + instances migrated from `SignedExtension`. + - Extensions now track the weight they consume during valdiation, preparation and + post-dispatch through the `TransactionExtensionBase::weight` function. + - `TestXt` was removed and its usage in tests was replaced with `UncheckedExtrinsic` + instances. + + To fix the build issues introduced by this change, use the `AsTransactionExtension` adapter + to wrap existing `SignedExtension`s by converting them using the `From` + generic implementation for `AsTransactionExtension`. More details on migrating existing + `SignedExtension` implementations to `TransactionExtension` in the PR description. + +crates: + - name: bridge-runtime-common + - name: bp-bridge-hub-cumulus + - name: bp-kusama + - name: bp-polkadot-bulletin + - name: bp-polkadot + - name: bp-rococo + - name: bp-westend + - name: bp-polkadot-core + - name: bp-runtime + - name: snowbridge-pallet-inbound-queue + - name: snowbridge-pallet-outbound-queue + - name: snowbridge-pallet-system + - name: snowbridge-runtime-test-common + - name: parachain-template-runtime + - name: asset-hub-rococo-runtime + - name: asset-hub-westend-runtime + - name: bridge-hub-rococo-runtime + - name: bridge-hub-westend-runtime + - name: collectives-westend-runtime + - name: contracts-rococo-runtime + - name: coretime-rococo-runtime + - name: coretime-westend-runtime + - name: glutton-westend-runtime + - name: people-rococo-runtime + - name: people-westend-runtime + - name: seedling-runtime + - name: shell-runtime + - name: penpal-runtime + - name: rococo-parachain-runtime + - name: polkadot-parachain-bin + - name: cumulus-primitives-storage-weight-reclaim + - name: cumulus-test-client + - name: cumulus-test-runtime + - name: cumulus-test-service + - name: polkadot-sdk-docs + - name: polkadot-service + - name: polkadot-test-service + - name: polkadot-runtime-common + - name: rococo-runtime + - name: polkadot-test-runtime + - name: westend-runtime + - name: staging-xcm-builder + - name: minimal-runtime + - name: node-template + - name: node-template-runtime + - name: staging-node-cli + - name: kitchensink-runtime + - name: node-testing + - name: sc-client-api + - name: sc-client-db + - name: sc-network-gossip + - name: sc-network-sync + - name: sc-transaction-pool + - name: frame + - name: pallet-babe + - name: pallet-balances + - name: pallet-beefy + - name: pallet-collective + - name: pallet-election-provider-multi-phase + - name: pallet-elections-phragmen + - name: pallet-example-basic + - name: pallet-example-offchain-worker + - name: frame-executive + - name: pallet-grandpa + - name: pallet-im-online + - name: pallet-offences + - name: pallet-sassafras + - name: pallet-state-trie-migration + - name: pallet-sudo + - name: frame-support-procedural + - name: frame-support + - name: frame-system + - name: frame-system-benchmarking + - name: pallet-transaction-payment + - name: pallet-asset-conversion-tx-payment + - name: pallet-asset-tx-payment + - name: pallet-skip-feeless-payment + - name: sp-inherents + - name: sp-metadata-ir + - name: sp-runtime + - name: substrate-test-runtime + - name: frame-benchmarking-cli + - name: frame-remote-externalities + - name: substrate-rpc-client diff --git a/prdoc/pr_2393.prdoc b/prdoc/pr_2393.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..708d017fafd50d103c0470997c3a7571e0ff30c1 --- /dev/null +++ b/prdoc/pr_2393.prdoc @@ -0,0 +1,16 @@ +title: "[XCMP] Use the number of 'ready' pages in XCMP suspend logic" + +doc: + - audience: Runtime Dev + description: | + Semantics of the suspension logic in the XCMP queue pallet change from using the number of + total pages to the number of 'ready' pages. The number of ready pages is now also exposed by + the `MessageQueue` pallet to downstream via the queue `footprint`. + +crates: + - name: cumulus-pallet-xcmp-queue + bump: patch + - name: pallet-message-queue + bump: patch + - name: frame-support + bump: major diff --git a/prdoc/pr_3187.prdoc b/prdoc/pr_3187.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..bda41142d86c8f9c1af8398e0cf21cdfeebebfa7 --- /dev/null +++ b/prdoc/pr_3187.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Retrying an execution on failed runtime construction + +doc: + - audience: Node Dev + description: | + If a runtime construction error happened during the execution request, then the artifact is re-prepared + and the execution request is retried at most once. See also the related issue. + +crates: + - name: polkadot-node-core-candidate-validation + - name: polkadot-node-core-pvf + - name: polkadot-node-core-pvf-execute-worker + - name: polkadot-node-core-pvf-common diff --git a/prdoc/pr_3233.prdoc b/prdoc/pr_3233.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..ed4e8cce31f75b4bd98f91f7d15ec3fae0761354 --- /dev/null +++ b/prdoc/pr_3233.prdoc @@ -0,0 +1,12 @@ +title: "provisioner: allow multiple cores assigned to the same para" + +topic: Node + +doc: + - audience: Node Dev + description: | + Enable supplying multiple backable candidates to the paras_inherent pallet for the same paraid. + +crates: + - name: polkadot-node-core-prospective-parachains + - name: polkadot-node-core-provisioner diff --git a/prdoc/pr_3377.prdoc b/prdoc/pr_3377.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..8e5b3935512b1b2dd12836e940a5d95040df8cf2 --- /dev/null +++ b/prdoc/pr_3377.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Permissioned contract deployment + +doc: + - audience: Runtime Dev + description: | + This PR introduces two new config types that specify the origins allowed to + upload and instantiate contract code. However, this check is not enforced when + a contract instantiates another contract. + +crates: +- name: pallet-contracts diff --git a/prdoc/pr_3378.prdoc b/prdoc/pr_3378.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..2470fc158519e31e297a7cbefcae3bc54eb8f708 --- /dev/null +++ b/prdoc/pr_3378.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove deprecated GenesisConfig + +doc: + - audience: Runtime Dev + description: | + Removes deprecated type `GenesisConfig`, it was replaced by `RuntimeGenesisConfig` on May 24 of 2023. + The type `GenesisConfig` was deprecated on May 24 of 2023 [#14210](https://github.com/paritytech/substrate/pull/14210) + +crates: + - name: frame-support-procedural \ No newline at end of file diff --git a/prdoc/pr_3403.prdoc b/prdoc/pr_3403.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..086e769da3c39c4d3def42791c18a82948676386 --- /dev/null +++ b/prdoc/pr_3403.prdoc @@ -0,0 +1,22 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Add `claim_assets` extrinsic to `pallet-xcm`" + +doc: + - audience: Runtime User + description: | + There's a new extrinsic in `pallet-xcm` for claiming assets. + This means that if your assets ever get trapped while teleporting or doing reserve asset transfers, + you can easily claim them by calling this new extrinsic. + - audience: Runtime Dev + description: | + There's a new extrinsic in `pallet-xcm` that needs a new configuration item for its benchmarks. + It's a simple function in `pallet_xcm::benchmarking::Config`, `get_asset`, that returns a valid asset + handled by the AssetTransactor of the chain. + If you're already using `pallet-xcm-benchmarks`, then you already have this function there and can + just copy and paste it. + +crates: + - name: pallet-xcm + - name: staging-xcm diff --git a/prdoc/pr_3412.prdoc b/prdoc/pr_3412.prdoc index d68f05e45dcf438b93283b4cbc90fdf1d808cd7e..1ee6edfeb837f81646d32633ed009d2cded4dcfe 100644 --- a/prdoc/pr_3412.prdoc +++ b/prdoc/pr_3412.prdoc @@ -13,5 +13,5 @@ doc: crates: - name: pallet-babe - - name: pallet-aura-ext + - name: cumulus-pallet-aura-ext - name: pallet-session diff --git a/prdoc/pr_3454.prdoc b/prdoc/pr_3454.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..7f564258f23a57f3784c2ee055176153f006734f --- /dev/null +++ b/prdoc/pr_3454.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Introduce storage attr macro #[disable_try_decode_storage] and set it on System::Events and ParachainSystem::HostConfiguration" + +doc: + - audience: Runtime Dev + description: | + Allows marking storage items with \#[disable_try_decode_storage], which disables that storage item from being decoded + during try_decode_entire_state calls. + + Applied the attribute to System::Events to close https://github.com/paritytech/polkadot-sdk/issues/2560. + Applied the attribute to ParachainSystem::HostConfiguration to resolve periodic issues with it. + +crates: + - name: frame-support-procedural + - name: frame-system + - name: cumulus-pallet-parachain-system + diff --git a/prdoc/pr_3456.prdoc b/prdoc/pr_3456.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c7327e17e57d74614fd1b593cc222faf4a87192d --- /dev/null +++ b/prdoc/pr_3456.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from `pallet-collective` + +doc: + - audience: Runtime Dev + description: | + This PR removes `pallet::getter` usage from `pallet-collective`, and updates dependant code accordingly. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-collective + - name: kitchensink-runtime + - name: collectives-westend-runtime diff --git a/prdoc/pr_3460.prdoc b/prdoc/pr_3460.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1f16fe0890836dd25bfcaa75d5fc647688230dd8 --- /dev/null +++ b/prdoc/pr_3460.prdoc @@ -0,0 +1,26 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Repot all templates + +doc: + - audience: Runtime Dev + description: | + This PR moves all templates into a single folder in the polkadot-sdk repo (`/templates`) and + unifies their crate names as well. Most notably, the crate name for what was formerly known + as `node-template` is no `solochain-template-node`. The other two crates in the template are + consequently called: `solochain-runtime-template` and `pallet-solochain-template`. + The other two template crate names follow a similar patter, just replacing `solochain` with + `parachain` or `minimal`. + + This PR is part of a bigger step toward automating the template repositories, see the + following: https://github.com/paritytech/polkadot-sdk/issues/3155 + +# the following crates are removed and renamed, although none are released. +crates: + - name: minimal-template-runtime # formerly called minimal-runtime + - name: minimal-template-node # formerly called minimal-node + - name: solochain-template-node # formerly called node-template + - name: solochain-template-runtime # formerly called node-template-runtime + - name: parachain-template-runtime # formerly called parachain-runtime + - name: parachain-template-runtime # formerly called parachain-node diff --git a/prdoc/pr_3491.prdoc b/prdoc/pr_3491.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e36afb916a6226c3609b1e2eb8aacf6af8127d32 --- /dev/null +++ b/prdoc/pr_3491.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove Deprecated OldWeight + +doc: + - audience: Runtime Dev + description: | + Removed deprecated sp_weights::OldWeight type. Use [`weight_v2::Weight`] instead. + +crates: + - name: frame-support diff --git a/prdoc/pr_3504.prdoc b/prdoc/pr_3504.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..90a8ddd38b8f7f8159199ac6423ffee3dc645291 --- /dev/null +++ b/prdoc/pr_3504.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: add prometheus label "is_rate_limited" to rpc calls + +doc: + - audience: Node Operator + description: | + This PR adds a label "is_rate_limited" to the prometheus metrics "substrate_rpc_calls_time" and "substrate_rpc_calls_finished" + than can be used to distinguish rate-limited RPC calls from other RPC calls. Because rate-limited RPC calls may take + tens of seconds. + +crates: [ ] diff --git a/prdoc/pr_3505.prdoc b/prdoc/pr_3505.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..08ad909739c7d761371e0e1e40d575794fc4e217 --- /dev/null +++ b/prdoc/pr_3505.prdoc @@ -0,0 +1,36 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removes `as [disambiguation_path]` from the required syntax in `derive_impl` + +doc: + - audience: Runtime Dev + description: | + This PR removes the need to specify `as [disambiguation_path]` for cases where the trait + definition resides within the same scope as default impl path. + + For example, in the following macro invocation + ```rust + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + ... + } + ``` + the trait `DefaultConfig` lies within the `frame_system` scope and `TestDefaultConfig` impls + the `DefaultConfig` trait. + + Using this information, we can compute the disambiguation path internally, thus removing the + need of an explicit specification: + ```rust + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for Runtime { + ... + } + ``` + + In cases where the trait lies outside this scope, we would still need to specify it explicitly, + but this should take care of most (if not all) uses of `derive_impl` within FRAME's context. + +crates: + - name: frame-support-procedural + - name: pallet-default-config-example diff --git a/prdoc/pr_3510.prdoc b/prdoc/pr_3510.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..6ee2f9a81f750ef0c64a2b8d8197e1e44d7e9f0c --- /dev/null +++ b/prdoc/pr_3510.prdoc @@ -0,0 +1,13 @@ +title: "Fix multi-collator parachain transition to async backing" + +doc: + - audience: Node Operator + description: | + The dynamic Aura slot duration, introduced in PR#3211, didn't take the block import pipeline + into account. The result was the parachain backed by multiple collators not being able to + keep producing blocks after its runtime was upgraded to support async backing, requiring to + restart all the collator nodes. This change fixes the issue, introducing the dynamic Aura + slot duration into the block import pipeline. + +crates: + - name: "polkadot-parachain-bin" diff --git a/prdoc/pr_3513.prdoc b/prdoc/pr_3513.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e1f2afe280a5bc2e8d341f9ab5ac73315f1676e8 --- /dev/null +++ b/prdoc/pr_3513.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix call enum's metadata regression + +doc: + - audience: Runtime Dev + - audience: Runtime User + description: | + This PR fixes an issue where in the metadata of all FRAME-based chains, the documentation for + all extrinsic/call/transactions has been replaced by a single line saying "see Pallet::name". + + Many wallets display the metadata of transactions to the user before signing, and this bug + might have affected this process. + +crates: + - name: frame + - name: frame-support diff --git a/prdoc/pr_3523.prdoc b/prdoc/pr_3523.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c31d9d096dc00797254f7946fd44984636cb1547 --- /dev/null +++ b/prdoc/pr_3523.prdoc @@ -0,0 +1,19 @@ +title: Fix crash of synced parachain node run with `--sync=warp` + +doc: + - audience: Node Operator + description: | + Fix crash of `SyncingEngine` when an already synced parachain node is run with `--sync=warp` + (issue https://github.com/paritytech/polkadot-sdk/issues/3496). + The issue manifests itself by errors in the logs: + ``` + [Parachain] Cannot set warp sync target block: no warp sync strategy is active. + [Parachain] Failed to set warp sync target block header, terminating `SyncingEngine`. + ``` + Followed by a stream of messages: + ``` + [Parachain] Protocol command streams have been shut down + ``` + +crates: + - name: sc-network-sync diff --git a/prdoc/schema_user.json b/prdoc/schema_user.json index 82215d51866b35895b5e840a8f3a900b161a9cf6..1bd0b3b93ee45ea75d9781ef1485898dd86c7ff3 100644 --- a/prdoc/schema_user.json +++ b/prdoc/schema_user.json @@ -3,9 +3,8 @@ "$id": "https://raw.githubusercontent.com/paritytech/prdoc/master/prdoc_schema_user.json", "version": { "major": 1, - "minor": 0, - "patch": 0, - "timestamp": 20230817152351 + "minor": 1, + "patch": 0 }, "title": "Polkadot SDK PRDoc Schema", "description": "JSON Schema definition for the Polkadot SDK PR documentation", @@ -125,10 +124,16 @@ "name": { "type": "string" }, + "bump": { + "$ref": "#/$defs/semver_bump" + }, "note": { "type": "string" } - } + }, + "required": [ + "name" + ] }, "migration_db": { "type": "object", @@ -165,6 +170,26 @@ "description" ] }, + "semver_bump": { + "description": "The type of bump to apply to the crate version according to Cargo SemVer: https://doc.rust-lang.org/cargo/reference/semver.html. Please check docs/RELEASE.md for more information.", + "oneOf": [ + { + "const": "major", + "title": "Major", + "description": "A bump to the leftmost non-zero digit of the version number." + }, + { + "const": "minor", + "title": "Minor", + "description": "A bump to the second leftmost non-zero digit of the version number." + }, + { + "const": "patch", + "title": "Patch", + "description": "A bump to the third leftmost non-zero digit of the version number." + } + ] + }, "doc": { "type": "object", "description": "You have the the option to provide different description of your PR for different audiences.", diff --git a/substrate/.maintain/frame-weight-template.hbs b/substrate/.maintain/frame-weight-template.hbs index ecd384a514563cfb4ecb0e9f364cdf473f5b5cec..ec9eee205cee37b534eca5187ece031daa54dd99 100644 --- a/substrate/.maintain/frame-weight-template.hbs +++ b/substrate/.maintain/frame-weight-template.hbs @@ -33,7 +33,7 @@ pub trait WeightInfo { /// Weights for `{{pallet}}` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -{{#if (eq pallet "frame_system")}} +{{#if (or (eq pallet "frame_system") (eq pallet "frame_system_extensions"))}} impl WeightInfo for SubstrateWeight { {{else}} impl WeightInfo for SubstrateWeight { diff --git a/substrate/bin/minimal/node/Cargo.toml b/substrate/bin/minimal/node/Cargo.toml deleted file mode 100644 index 65644861c9acaca95c6b152e7d237badbed89891..0000000000000000000000000000000000000000 --- a/substrate/bin/minimal/node/Cargo.toml +++ /dev/null @@ -1,60 +0,0 @@ -[package] -name = "minimal-node" -version = "4.0.0-dev" -description = "A fresh FRAME-based Substrate node, ready for hacking." -authors = ["Substrate DevHub "] -homepage = "https://substrate.io/" -edition = "2021" -license = "MIT-0" -publish = false -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" -build = "build.rs" - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[[bin]] -name = "minimal-node" - -[dependencies] -clap = { version = "4.5.1", features = ["derive"] } -futures = { version = "0.3.21", features = ["thread-pool"] } -futures-timer = "3.0.1" -jsonrpsee = { version = "0.22", features = ["server"] } -serde_json = { workspace = true, default-features = true } - -sc-cli = { path = "../../../client/cli" } -sc-executor = { path = "../../../client/executor" } -sc-network = { path = "../../../client/network" } -sc-service = { path = "../../../client/service" } -sc-telemetry = { path = "../../../client/telemetry" } -sc-transaction-pool = { path = "../../../client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -sc-consensus = { path = "../../../client/consensus/common" } -sc-consensus-manual-seal = { path = "../../../client/consensus/manual-seal" } -sc-rpc-api = { path = "../../../client/rpc-api" } -sc-basic-authorship = { path = "../../../client/basic-authorship" } -sc-offchain = { path = "../../../client/offchain" } -sc-client-api = { path = "../../../client/api" } - -sp-timestamp = { path = "../../../primitives/timestamp" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sp-io = { path = "../../../primitives/io" } -sp-runtime = { path = "../../../primitives/runtime" } - -substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } - -frame = { path = "../../../frame", features = ["experimental", "runtime"] } -runtime = { package = "minimal-runtime", path = "../runtime" } - -[build-dependencies] -substrate-build-script-utils = { path = "../../../utils/build-script-utils" } - -[features] -default = [] diff --git a/substrate/bin/minimal/runtime/Cargo.toml b/substrate/bin/minimal/runtime/Cargo.toml deleted file mode 100644 index 296106544bbfdbbb1f7f76b86d5c8ddefb54f917..0000000000000000000000000000000000000000 --- a/substrate/bin/minimal/runtime/Cargo.toml +++ /dev/null @@ -1,50 +0,0 @@ -[package] -name = "minimal-runtime" -version = "0.1.0" -authors.workspace = true -description = "A minimal Substrate example runtime" -edition.workspace = true -repository.workspace = true -license.workspace = true -publish = false - -[lints] -workspace = true - -[dependencies] -parity-scale-codec = { version = "3.0.0", default-features = false } -scale-info = { version = "2.6.0", default-features = false } - -# this is a frame-based runtime, thus importing `frame` with runtime feature enabled. -frame = { path = "../../../frame", default-features = false, features = ["experimental", "runtime"] } -frame-support = { path = "../../../frame/support", default-features = false } - -# pallets that we want to use -pallet-balances = { path = "../../../frame/balances", default-features = false } -pallet-sudo = { path = "../../../frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } - -# genesis builder that allows us to interacto with runtime genesis config -sp-genesis-builder = { path = "../../../primitives/genesis-builder", default-features = false } - - -[build-dependencies] -substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } - -[features] -default = ["std"] -std = [ - "frame-support/std", - "frame/std", - "pallet-balances/std", - "pallet-sudo/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "parity-scale-codec/std", - "scale-info/std", - "sp-genesis-builder/std", - "substrate-wasm-builder", -] diff --git a/substrate/bin/node-template/.editorconfig b/substrate/bin/node-template/.editorconfig deleted file mode 100644 index 5adac74ca24b3422222b2cb886b2a50cd22b6d1b..0000000000000000000000000000000000000000 --- a/substrate/bin/node-template/.editorconfig +++ /dev/null @@ -1,16 +0,0 @@ -root = true - -[*] -indent_style=space -indent_size=2 -tab_width=2 -end_of_line=lf -charset=utf-8 -trim_trailing_whitespace=true -insert_final_newline = true - -[*.{rs,toml}] -indent_style=tab -indent_size=tab -tab_width=4 -max_line_length=100 diff --git a/substrate/bin/node-template/env-setup/.envrc b/substrate/bin/node-template/env-setup/.envrc deleted file mode 100644 index 3550a30f2de389e537ee40ca5e64a77dc185c79b..0000000000000000000000000000000000000000 --- a/substrate/bin/node-template/env-setup/.envrc +++ /dev/null @@ -1 +0,0 @@ -use flake diff --git a/substrate/bin/node-template/node/Cargo.toml b/substrate/bin/node-template/node/Cargo.toml deleted file mode 100644 index 9cba2b5a36602cae3687b8d5afaa71510cd0d926..0000000000000000000000000000000000000000 --- a/substrate/bin/node-template/node/Cargo.toml +++ /dev/null @@ -1,92 +0,0 @@ -[package] -name = "node-template" -version = "4.0.0-dev" -description = "A fresh FRAME-based Substrate node, ready for hacking." -authors = ["Substrate DevHub "] -homepage = "https://substrate.io/" -edition.workspace = true -license = "MIT-0" -publish = false -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" -build = "build.rs" - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[[bin]] -name = "node-template" - -[dependencies] -clap = { version = "4.5.1", features = ["derive"] } -futures = { version = "0.3.21", features = ["thread-pool"] } -serde_json = { workspace = true, default-features = true } - -sc-cli = { path = "../../../client/cli" } -sp-core = { path = "../../../primitives/core" } -sc-executor = { path = "../../../client/executor" } -sc-network = { path = "../../../client/network" } -sc-service = { path = "../../../client/service" } -sc-telemetry = { path = "../../../client/telemetry" } -sc-transaction-pool = { path = "../../../client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -sc-offchain = { path = "../../../client/offchain" } -sc-consensus-aura = { path = "../../../client/consensus/aura" } -sp-consensus-aura = { path = "../../../primitives/consensus/aura" } -sc-consensus = { path = "../../../client/consensus/common" } -sc-consensus-grandpa = { path = "../../../client/consensus/grandpa" } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" } -sc-client-api = { path = "../../../client/api" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-io = { path = "../../../primitives/io" } -sp-timestamp = { path = "../../../primitives/timestamp" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-keyring = { path = "../../../primitives/keyring" } -frame-system = { path = "../../../frame/system" } -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } - -# These dependencies are used for the node template's RPCs -jsonrpsee = { version = "0.22", features = ["server"] } -sp-api = { path = "../../../primitives/api" } -sc-rpc-api = { path = "../../../client/rpc-api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sc-basic-authorship = { path = "../../../client/basic-authorship" } -substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } -pallet-transaction-payment-rpc = { path = "../../../frame/transaction-payment/rpc" } - -# These dependencies are used for runtime benchmarking -frame-benchmarking = { path = "../../../frame/benchmarking" } -frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli" } - -# Local Dependencies -node-template-runtime = { path = "../runtime" } - -# CLI-specific dependencies -try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true } - -[build-dependencies] -substrate-build-script-utils = { path = "../../../utils/build-script-utils" } - -[features] -default = [] -# Dependencies that are only required if runtime benchmarking should be build. -runtime-benchmarks = [ - "frame-benchmarking-cli/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "node-template-runtime/runtime-benchmarks", - "sc-service/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] -# Enable features that allow the runtime to be tried and debugged. Name might be subject to change -# in the near future. -try-runtime = [ - "frame-system/try-runtime", - "node-template-runtime/try-runtime", - "pallet-transaction-payment/try-runtime", - "sp-runtime/try-runtime", - "try-runtime-cli/try-runtime", -] diff --git a/substrate/bin/node-template/runtime/Cargo.toml b/substrate/bin/node-template/runtime/Cargo.toml deleted file mode 100644 index 86bab0ca375a84cad0fc9ac2faf5fbc105c0b001..0000000000000000000000000000000000000000 --- a/substrate/bin/node-template/runtime/Cargo.toml +++ /dev/null @@ -1,125 +0,0 @@ -[package] -name = "node-template-runtime" -version = "4.0.0-dev" -description = "A fresh FRAME-based Substrate node, ready for hacking." -authors = ["Substrate DevHub "] -homepage = "https://substrate.io/" -edition.workspace = true -license = "MIT-0" -publish = false -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } - -pallet-aura = { path = "../../../frame/aura", default-features = false } -pallet-balances = { path = "../../../frame/balances", default-features = false } -frame-support = { path = "../../../frame/support", default-features = false } -pallet-grandpa = { path = "../../../frame/grandpa", default-features = false } -pallet-sudo = { path = "../../../frame/sudo", default-features = false } -frame-system = { path = "../../../frame/system", default-features = false } -frame-try-runtime = { path = "../../../frame/try-runtime", default-features = false, optional = true } -pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } -frame-executive = { path = "../../../frame/executive", default-features = false } -sp-api = { path = "../../../primitives/api", default-features = false } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../primitives/consensus/aura", default-features = false, features = ["serde"] } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-core = { path = "../../../primitives/core", default-features = false, features = ["serde"] } -sp-inherents = { path = "../../../primitives/inherents", default-features = false } -sp-offchain = { path = "../../../primitives/offchain", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../../primitives/session", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-storage = { path = "../../../primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../primitives/version", default-features = false, features = ["serde"] } -serde_json = { features = ["alloc"], workspace = true } -sp-genesis-builder = { default-features = false, path = "../../../primitives/genesis-builder" } - -# Used for the node template's RPCs -frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } - -# Used for runtime benchmarking -frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false, optional = true } -frame-system-benchmarking = { path = "../../../frame/system/benchmarking", default-features = false, optional = true } - -# Local Dependencies -pallet-template = { path = "../pallets/template", default-features = false } - -[build-dependencies] -substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-benchmarking?/std", - "frame-executive/std", - "frame-support/std", - "frame-system-benchmarking?/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "frame-try-runtime?/std", - "pallet-aura/std", - "pallet-balances/std", - "pallet-grandpa/std", - "pallet-sudo/std", - "pallet-template/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "scale-info/std", - "serde_json/std", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-consensus-grandpa/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-std/std", - "sp-storage/std", - "sp-transaction-pool/std", - "sp-version/std", - "substrate-wasm-builder", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-grandpa/runtime-benchmarks", - "pallet-sudo/runtime-benchmarks", - "pallet-template/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] -try-runtime = [ - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-aura/try-runtime", - "pallet-balances/try-runtime", - "pallet-grandpa/try-runtime", - "pallet-sudo/try-runtime", - "pallet-template/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-transaction-payment/try-runtime", - "sp-runtime/try-runtime", -] -experimental = ["pallet-aura/experimental"] diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index abe284c41da1f21c064cebcf05ee23c9f4f540f0..c91a83e6d6eab210a7a54a0eb4cc156c1ad79e23 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -196,6 +196,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "kitchensink-runtime/runtime-benchmarks", "node-inspect?/runtime-benchmarks", + "pallet-asset-conversion-tx-payment/runtime-benchmarks", "pallet-asset-tx-payment/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", @@ -205,6 +206,7 @@ runtime-benchmarks = [ "pallet-skip-feeless-payment/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "sc-client-db/runtime-benchmarks", "sc-service/runtime-benchmarks", diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index 23a62cc0bd243ebd1c8ade712b3ded2361f9e4c0..b98a321c338e93e6fb773cecf67d13a978d1a7bd 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -110,7 +110,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic { kitchensink_runtime::UncheckedExtrinsic { - signature: None, + preamble: sp_runtime::generic::Preamble::Bare, function: kitchensink_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { now }), } .into() diff --git a/substrate/bin/node/cli/benches/executor.rs b/substrate/bin/node/cli/benches/executor.rs index a326e1a79ea347f169e372581d07dc4f43848e24..e13d34f9657acea48f0b53aa021296e9897f4780 100644 --- a/substrate/bin/node/cli/benches/executor.rs +++ b/substrate/bin/node/cli/benches/executor.rs @@ -29,7 +29,7 @@ use sp_core::{ storage::well_known_keys, traits::{CallContext, CodeExecutor, RuntimeCode}, }; -use sp_runtime::traits::BlakeTwo256; +use sp_runtime::{generic::ExtrinsicFormat, traits::BlakeTwo256}; use sp_state_machine::TestExternalities as CoreTestExternalities; use staging_node_cli::service::RuntimeExecutor; @@ -144,11 +144,11 @@ fn test_blocks( ) -> Vec<(Vec, Hash)> { let mut test_ext = new_test_ext(genesis_config); let mut block1_extrinsics = vec![CheckedExtrinsic { - signed: None, + format: ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: 0 }), }]; block1_extrinsics.extend((0..20).map(|i| CheckedExtrinsic { - signed: Some((alice(), signed_extra(i, 0))), + format: ExtrinsicFormat::Signed(alice(), tx_ext(i, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 1 * DOLLARS, diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 8f2aba6b44cd0a980771ec7d84eb383421551a6b..157f278fb5345526727f812b3bc5efec5d782000 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -107,18 +107,21 @@ pub fn create_extrinsic( .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let extra: kitchensink_runtime::SignedExtra = + let tx_ext: kitchensink_runtime::TxExtension = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(generic::Era::mortal( - period, - best_block.saturated_into(), - )), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), + ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(generic::Era::mortal( + period, + best_block.saturated_into(), + )), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + ) + .into(), pallet_skip_feeless_payment::SkipCheckIfFeeless::from( pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::< kitchensink_runtime::Runtime, @@ -128,15 +131,17 @@ pub fn create_extrinsic( let raw_payload = kitchensink_runtime::SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ( - (), - kitchensink_runtime::VERSION.spec_version, - kitchensink_runtime::VERSION.transaction_version, - genesis_hash, - best_hash, - (), - (), + ( + (), + kitchensink_runtime::VERSION.spec_version, + kitchensink_runtime::VERSION.transaction_version, + genesis_hash, + best_hash, + (), + (), + ), (), ), ); @@ -146,7 +151,7 @@ pub fn create_extrinsic( function, sp_runtime::AccountId32::from(sender.public()).into(), kitchensink_runtime::Signature::Sr25519(signature), - extra, + tx_ext, ) } @@ -791,7 +796,7 @@ mod tests { use codec::Encode; use kitchensink_runtime::{ constants::{currency::CENTS, time::SLOT_DURATION}, - Address, BalancesCall, RuntimeCall, UncheckedExtrinsic, + Address, BalancesCall, RuntimeCall, TxExtension, UncheckedExtrinsic, }; use node_primitives::{Block, DigestItem, Signature}; use sc_client_api::BlockBackend; @@ -993,25 +998,31 @@ mod tests { let tx_payment = pallet_skip_feeless_payment::SkipCheckIfFeeless::from( pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None), ); - let extra = ( - check_non_zero_sender, - check_spec_version, - check_tx_version, - check_genesis, - check_era, - check_nonce, - check_weight, + let tx_ext: TxExtension = ( + ( + check_non_zero_sender, + check_spec_version, + check_tx_version, + check_genesis, + check_era, + check_nonce, + check_weight, + ) + .into(), tx_payment, ); let raw_payload = SignedPayload::from_raw( function, - extra, - ((), spec_version, transaction_version, genesis_hash, genesis_hash, (), (), ()), + tx_ext, + ( + ((), spec_version, transaction_version, genesis_hash, genesis_hash, (), ()), + (), + ), ); let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); - let (function, extra, _) = raw_payload.deconstruct(); + let (function, tx_ext, _) = raw_payload.deconstruct(); index += 1; - UncheckedExtrinsic::new_signed(function, from.into(), signature.into(), extra) + UncheckedExtrinsic::new_signed(function, from.into(), signature.into(), tx_ext) .into() }, ); diff --git a/substrate/bin/node/cli/tests/basic.rs b/substrate/bin/node/cli/tests/basic.rs index 525ab2e39c1287edcf235fe3ac6381e3f0fc8e10..5fcb2295ef009adaeb84017534cf2965917405b9 100644 --- a/substrate/bin/node/cli/tests/basic.rs +++ b/substrate/bin/node/cli/tests/basic.rs @@ -67,7 +67,7 @@ fn transfer_fee(extrinsic: &E) -> Balance { fn xt() -> UncheckedExtrinsic { sign(CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)), function: RuntimeCall::Balances(default_transfer_call()), }) } @@ -84,11 +84,11 @@ fn changes_trie_block() -> (Vec, Hash) { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 69 * DOLLARS, @@ -111,11 +111,11 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time1 }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 69 * DOLLARS, @@ -131,18 +131,18 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { block1.1, vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time2 }), }, CheckedExtrinsic { - signed: Some((bob(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(bob(), tx_ext(0, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: alice().into(), value: 5 * DOLLARS, }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(1, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(1, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 15 * DOLLARS, @@ -166,11 +166,11 @@ fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(nonce, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(nonce, 0)), function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0; size] }), }, ], @@ -677,11 +677,11 @@ fn deploying_wasm_contract_should_work() { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(0, 0)), function: RuntimeCall::Contracts(pallet_contracts::Call::instantiate_with_code::< Runtime, > { @@ -694,7 +694,7 @@ fn deploying_wasm_contract_should_work() { }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(1, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(1, 0)), function: RuntimeCall::Contracts(pallet_contracts::Call::call:: { dest: sp_runtime::MultiAddress::Id(addr.clone()), value: 10, diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index 8c7b3c873157770a8f156a019aa7e43e66460bae..9d6407067a37a51791a9d74ed987d1928926442e 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -54,11 +54,11 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time1 }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(0, 0)), function: RuntimeCall::Sudo(pallet_sudo::Call::sudo { call: Box::new(RuntimeCall::RootTesting( pallet_root_testing::Call::fill_block { ratio: Perbill::from_percent(60) }, @@ -77,11 +77,11 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { block1.1, vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time2 }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(1, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(1, 0)), function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 1] }), }, ], @@ -147,7 +147,7 @@ fn transaction_fee_is_correct() { let tip = 1_000_000; let xt = sign(CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, tip))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, tip)), function: RuntimeCall::Balances(default_transfer_call()), }); @@ -211,7 +211,10 @@ fn block_weight_capacity_report() { let num_transfers = block_number * factor; let mut xts = (0..num_transfers) .map(|i| CheckedExtrinsic { - signed: Some((charlie(), signed_extra(nonce + i as Nonce, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed( + charlie(), + tx_ext(nonce + i as Nonce, 0), + ), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 0, @@ -222,7 +225,7 @@ fn block_weight_capacity_report() { xts.insert( 0, CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), }, ); @@ -285,13 +288,16 @@ fn block_length_capacity_report() { previous_hash, vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000, }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(nonce, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed( + charlie(), + tx_ext(nonce, 0), + ), function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0u8; (block_number * factor) as usize], }), diff --git a/substrate/bin/node/cli/tests/submit_transaction.rs b/substrate/bin/node/cli/tests/submit_transaction.rs index 5cbb0103d471b96902bb341bcd796e6cf09eac76..f3a5bac8fb52bd06d49703faeeaa18ff724a977b 100644 --- a/substrate/bin/node/cli/tests/submit_transaction.rs +++ b/substrate/bin/node/cli/tests/submit_transaction.rs @@ -130,8 +130,8 @@ fn should_submit_signed_twice_from_the_same_account() { // now check that the transaction nonces are not equal let s = state.read(); fn nonce(tx: UncheckedExtrinsic) -> frame_system::CheckNonce { - let extra = tx.signature.unwrap().2; - extra.5 + let extra = tx.preamble.to_signed().unwrap().2; + (extra.0).5 } let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap()); let nonce2 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[1]).unwrap()); @@ -179,8 +179,8 @@ fn should_submit_signed_twice_from_all_accounts() { // now check that the transaction nonces are not equal let s = state.read(); fn nonce(tx: UncheckedExtrinsic) -> frame_system::CheckNonce { - let extra = tx.signature.unwrap().2; - extra.5 + let extra = tx.preamble.to_signed().unwrap().2; + (extra.0).5 } let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap()); let nonce2 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[1]).unwrap()); @@ -236,7 +236,7 @@ fn submitted_transaction_should_be_valid() { let source = TransactionSource::External; let extrinsic = UncheckedExtrinsic::decode(&mut &*tx0).unwrap(); // add balance to the account - let author = extrinsic.signature.clone().unwrap().0; + let author = extrinsic.preamble.clone().to_signed().clone().unwrap().0; let address = Indices::lookup(author).unwrap(); let data = pallet_balances::AccountData { free: 5_000_000_000_000, ..Default::default() }; let account = frame_system::AccountInfo { providers: 1, data, ..Default::default() }; diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index aa13c3d292be6af6d6dba12e5b043a9069eab9a3..bdef42474d09b7cb23d53b7574e8a4186c0429c1 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -88,6 +88,7 @@ pallet-election-provider-support-benchmarking = { path = "../../../frame/electio pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", default-features = false } pallet-example-tasks = { path = "../../../frame/examples/tasks", default-features = false } pallet-fast-unstake = { path = "../../../frame/fast-unstake", default-features = false } +pallet-migrations = { path = "../../../frame/migrations", default-features = false } pallet-nis = { path = "../../../frame/nis", default-features = false } pallet-grandpa = { path = "../../../frame/grandpa", default-features = false } pallet-im-online = { path = "../../../frame/im-online", default-features = false } @@ -198,6 +199,7 @@ std = [ "pallet-lottery/std", "pallet-membership/std", "pallet-message-queue/std", + "pallet-migrations/std", "pallet-mixnet/std", "pallet-mmr/std", "pallet-multisig/std", @@ -274,6 +276,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-alliance/runtime-benchmarks", + "pallet-asset-conversion-tx-payment/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-asset-rate/runtime-benchmarks", "pallet-asset-tx-payment/runtime-benchmarks", @@ -302,6 +305,7 @@ runtime-benchmarks = [ "pallet-lottery/runtime-benchmarks", "pallet-membership/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", + "pallet-migrations/runtime-benchmarks", "pallet-mixnet/runtime-benchmarks", "pallet-mmr/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", @@ -330,6 +334,7 @@ runtime-benchmarks = [ "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-tips/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-transaction-storage/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-tx-pause/runtime-benchmarks", @@ -381,6 +386,7 @@ try-runtime = [ "pallet-lottery/try-runtime", "pallet-membership/try-runtime", "pallet-message-queue/try-runtime", + "pallet-migrations/try-runtime", "pallet-mixnet/try-runtime", "pallet-mmr/try-runtime", "pallet-multisig/try-runtime", diff --git a/substrate/bin/node/runtime/src/impls.rs b/substrate/bin/node/runtime/src/impls.rs index 7ff52a758b3dd756dc4536df5928d3b2bb68a148..34f043b33a4edfe2d8cdbe154896e937826110ca 100644 --- a/substrate/bin/node/runtime/src/impls.rs +++ b/substrate/bin/node/runtime/src/impls.rs @@ -30,8 +30,8 @@ use pallet_identity::legacy::IdentityField; use sp_std::prelude::*; use crate::{ - AccountId, AllianceMotion, Assets, Authorship, Balances, Hash, NegativeImbalance, Runtime, - RuntimeCall, + AccountId, AllianceCollective, AllianceMotion, Assets, Authorship, Balances, Hash, + NegativeImbalance, Runtime, RuntimeCall, }; pub struct Author; @@ -107,7 +107,7 @@ impl ProposalProvider for AllianceProposalProvider } fn proposal_of(proposal_hash: Hash) -> Option { - AllianceMotion::proposal_of(proposal_hash) + pallet_collective::ProposalOf::::get(proposal_hash) } } @@ -276,7 +276,7 @@ mod multiplier_tests { let next = runtime_multiplier_update(fm); fm = next; if fm == min_multiplier() { - break + break; } iterations += 1; } diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index b34d8c89e568ab78622010ea1c7aac35126bd2cd..18ce13cff8015ea83f25703888837283ce270281 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -310,6 +310,7 @@ impl frame_system::Config for Runtime { type SystemWeightInfo = frame_system::weights::SubstrateWeight; type SS58Prefix = ConstU16<42>; type MaxConsumers = ConstU32<16>; + type MultiBlockMigrator = MultiBlockMigrations; } impl pallet_insecure_randomness_collective_flip::Config for Runtime {} @@ -559,6 +560,7 @@ impl pallet_transaction_payment::Config for Runtime { MinimumMultiplier, MaximumMultiplier, >; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } impl pallet_asset_tx_payment::Config for Runtime { @@ -568,6 +570,9 @@ impl pallet_asset_tx_payment::Config for Runtime { pallet_assets::BalanceToAssetBalance, CreditToBlockAuthor, >; + type WeightInfo = pallet_asset_tx_payment::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetTxHelper; } impl pallet_asset_conversion_tx_payment::Config for Runtime { @@ -578,6 +583,9 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime { AssetConversion, Native, >; + type WeightInfo = pallet_asset_conversion_tx_payment::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetConversionTxHelper; } impl pallet_skip_feeless_payment::Config for Runtime { @@ -1357,6 +1365,8 @@ impl pallet_contracts::Config for Runtime { type MaxCodeLen = ConstU32<{ 123 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; type UnsafeUnstableInterface = ConstBool; + type UploadOrigin = EnsureSigned; + type InstantiateOrigin = EnsureSigned; type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type RuntimeHoldReason = RuntimeHoldReason; #[cfg(not(feature = "runtime-benchmarks"))] @@ -1406,29 +1416,33 @@ where // so the actual block number is `n`. .saturating_sub(1); let era = Era::mortal(period, current_block); - let extra = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(era), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), + let tx_ext: TxExtension = ( + ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(era), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + ) + .into(), pallet_skip_feeless_payment::SkipCheckIfFeeless::from( pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from( tip, None, ), ), ); - let raw_payload = SignedPayload::new(call, extra) + + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; let address = Indices::unlookup(account); - let (call, extra, _) = raw_payload.deconstruct(); - Some((call, (address, signature, extra))) + let (call, tx_ext, _) = raw_payload.deconstruct(); + Some((call, (address, signature, tx_ext))) } } @@ -2007,6 +2021,25 @@ impl pallet_statement::Config for Runtime { type MaxAllowedBytes = MaxAllowedBytes; } +parameter_types! { + pub MbmServiceWeight: Weight = Perbill::from_percent(80) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_migrations::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations = (); + // Benchmarks need mocked migrations to guarantee that they succeed. + #[cfg(feature = "runtime-benchmarks")] + type Migrations = pallet_migrations::mock_helpers::MockedMigrations; + type CursorMaxLen = ConstU32<65_536>; + type IdentifierMaxLen = ConstU32<256>; + type MigrationStatusHandler = (); + type FailedMigrationHandler = frame_support::migrations::FreezeChainOnFailedMigration; + type MaxServiceWeight = MbmServiceWeight; + type WeightInfo = pallet_migrations::weights::SubstrateWeight; +} + parameter_types! { pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); } @@ -2241,6 +2274,7 @@ construct_runtime!( TxPause: pallet_tx_pause, SafeMode: pallet_safe_mode, Statement: pallet_statement, + MultiBlockMigrations: pallet_migrations, Broker: pallet_broker, TasksExample: pallet_example_tasks, Mixnet: pallet_mixnet, @@ -2259,19 +2293,21 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. +/// The TransactionExtension to the basic transaction logic. /// /// When you change this, you **MUST** modify [`sign`] in `bin/node/testing/src/keyring.rs`! /// /// [`sign`]: <../../testing/src/keyring.rs.html> -pub type SignedExtra = ( - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, +pub type TxExtension = ( + ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + ), pallet_skip_feeless_payment::SkipCheckIfFeeless< Runtime, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, @@ -2280,11 +2316,11 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; /// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = generic::CheckedExtrinsic; +pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -2339,6 +2375,106 @@ mod mmr { pub type Hashing = ::Hashing; } +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetConversionTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl pallet_asset_conversion_tx_payment::BenchmarkHelperTrait + for AssetConversionTxHelper +{ + fn create_asset_id_parameter(seed: u32) -> (u32, u32) { + (seed, seed) + } + + fn setup_balances_and_pool(asset_id: u32, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + let _ = Balances::deposit_creating(&lp_provider, ((u64::MAX as u128) * 100).into()); + assert_ok!(Assets::mint_into( + asset_id.into(), + &lp_provider, + ((u64::MAX as u128) * 100).into() + )); + + let token_native = Box::new(NativeOrWithId::Native); + let token_second = Box::new(NativeOrWithId::WithId(asset_id)); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + u64::MAX.into(), // 1 desired + u64::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl pallet_asset_tx_payment::BenchmarkHelperTrait for AssetTxHelper { + fn create_asset_id_parameter(seed: u32) -> (u32, u32) { + (seed, seed) + } + + fn setup_balances_and_pool(asset_id: u32, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + let _ = Balances::deposit_creating(&lp_provider, ((u64::MAX as u128) * 100).into()); + assert_ok!(Assets::mint_into( + asset_id.into(), + &lp_provider, + ((u64::MAX as u128) * 100).into() + )); + + let token_native = Box::new(NativeOrWithId::Native); + let token_second = Box::new(NativeOrWithId::WithId(asset_id)); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + u64::MAX.into(), // 1 desired + u64::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + #[cfg(feature = "runtime-benchmarks")] mod benches { frame_benchmarking::define_benchmarks!( @@ -2359,6 +2495,9 @@ mod benches { [tasks_example, TasksExample] [pallet_democracy, Democracy] [pallet_asset_conversion, AssetConversion] + [pallet_asset_conversion_tx_payment, AssetConversionTxPayment] + [pallet_asset_tx_payment, AssetTxPayment] + [pallet_transaction_payment, TransactionPayment] [pallet_election_provider_multi_phase, ElectionProviderMultiPhase] [pallet_election_provider_support_benchmarking, EPSBench::] [pallet_elections_phragmen, Elections] @@ -2372,6 +2511,7 @@ mod benches { [pallet_lottery, Lottery] [pallet_membership, TechnicalMembership] [pallet_message_queue, MessageQueue] + [pallet_migrations, MultiBlockMigrations] [pallet_mmr, Mmr] [pallet_multisig, Multisig] [pallet_nomination_pools, NominationPoolsBench::] @@ -2391,6 +2531,7 @@ mod benches { [pallet_state_trie_migration, StateTrieMigration] [pallet_sudo, Sudo] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_timestamp, Timestamp] [pallet_tips, Tips] [pallet_transaction_storage, TransactionStorage] @@ -2417,7 +2558,7 @@ impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -2938,6 +3079,7 @@ impl_runtime_apis! { use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as EPSBench; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; @@ -2962,6 +3104,7 @@ impl_runtime_apis! { use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as EPSBench; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index df302a6453b9ffbef603afc36fc860a1535a7ca2..ccf79eed8847d8a2acdb4b5f96c15d021f750b65 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -51,6 +51,7 @@ use sp_core::{ed25519, sr25519, traits::SpawnNamed, Pair, Public}; use sp_crypto_hashing::blake2_256; use sp_inherents::InherentData; use sp_runtime::{ + generic::{ExtrinsicFormat, Preamble}, traits::{Block as BlockT, IdentifyAccount, Verify}, OpaqueExtrinsic, }; @@ -295,10 +296,10 @@ impl<'a> Iterator for BlockContentIterator<'a> { let signed = self.keyring.sign( CheckedExtrinsic { - signed: Some(( + format: ExtrinsicFormat::Signed( sender, - signed_extra(0, kitchensink_runtime::ExistentialDeposit::get() + 1), - )), + tx_ext(0, kitchensink_runtime::ExistentialDeposit::get() + 1), + ), function: match self.content.block_type { BlockType::RandomTransfersKeepAlive => RuntimeCall::Balances(BalancesCall::transfer_keep_alive { @@ -562,11 +563,11 @@ impl BenchKeyring { tx_version: u32, genesis_hash: [u8; 32], ) -> UncheckedExtrinsic { - match xt.signed { - Some((signed, extra)) => { + match xt.format { + ExtrinsicFormat::Signed(signed, tx_ext) => { let payload = ( xt.function, - extra.clone(), + tx_ext.clone(), spec_version, tx_version, genesis_hash, @@ -581,11 +582,20 @@ impl BenchKeyring { } }); UncheckedExtrinsic { - signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), + preamble: Preamble::Signed( + sp_runtime::MultiAddress::Id(signed), + signature, + tx_ext, + ), function: payload.0, } }, - None => UncheckedExtrinsic { signature: None, function: xt.function }, + ExtrinsicFormat::Bare => + UncheckedExtrinsic { preamble: Preamble::Bare, function: xt.function }, + ExtrinsicFormat::General(tx_ext) => UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::General(tx_ext), + function: xt.function, + }, } } diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs index f712191bed695031275cfb11c5e22c8fa2a26f78..13daf915325db370698e563e8a23140136e84a44 100644 --- a/substrate/bin/node/testing/src/keyring.rs +++ b/substrate/bin/node/testing/src/keyring.rs @@ -19,13 +19,13 @@ //! Test accounts. use codec::Encode; -use kitchensink_runtime::{CheckedExtrinsic, SessionKeys, SignedExtra, UncheckedExtrinsic}; +use kitchensink_runtime::{CheckedExtrinsic, SessionKeys, TxExtension, UncheckedExtrinsic}; use node_cli::chain_spec::get_from_seed; use node_primitives::{AccountId, Balance, Nonce}; use sp_core::{ecdsa, ed25519, sr25519}; use sp_crypto_hashing::blake2_256; use sp_keyring::AccountKeyring; -use sp_runtime::generic::Era; +use sp_runtime::generic::{Era, ExtrinsicFormat}; /// Alice's account id. pub fn alice() -> AccountId { @@ -70,15 +70,18 @@ pub fn session_keys_from_seed(seed: &str) -> SessionKeys { } /// Returns transaction extra. -pub fn signed_extra(nonce: Nonce, extra_fee: Balance) -> SignedExtra { +pub fn tx_ext(nonce: Nonce, extra_fee: Balance) -> TxExtension { ( - frame_system::CheckNonZeroSender::new(), - frame_system::CheckSpecVersion::new(), - frame_system::CheckTxVersion::new(), - frame_system::CheckGenesis::new(), - frame_system::CheckEra::from(Era::mortal(256, 0)), - frame_system::CheckNonce::from(nonce), - frame_system::CheckWeight::new(), + ( + frame_system::CheckNonZeroSender::new(), + frame_system::CheckSpecVersion::new(), + frame_system::CheckTxVersion::new(), + frame_system::CheckGenesis::new(), + frame_system::CheckEra::from(Era::mortal(256, 0)), + frame_system::CheckNonce::from(nonce), + frame_system::CheckWeight::new(), + ) + .into(), pallet_skip_feeless_payment::SkipCheckIfFeeless::from( pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), ), @@ -92,10 +95,10 @@ pub fn sign( tx_version: u32, genesis_hash: [u8; 32], ) -> UncheckedExtrinsic { - match xt.signed { - Some((signed, extra)) => { + match xt.format { + ExtrinsicFormat::Signed(signed, tx_ext) => { let payload = - (xt.function, extra.clone(), spec_version, tx_version, genesis_hash, genesis_hash); + (xt.function, tx_ext.clone(), spec_version, tx_version, genesis_hash, genesis_hash); let key = AccountKeyring::from_account_id(&signed).unwrap(); let signature = payload @@ -108,10 +111,21 @@ pub fn sign( }) .into(); UncheckedExtrinsic { - signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), + preamble: sp_runtime::generic::Preamble::Signed( + sp_runtime::MultiAddress::Id(signed), + signature, + tx_ext, + ), function: payload.0, } }, - None => UncheckedExtrinsic { signature: None, function: xt.function }, + ExtrinsicFormat::Bare => UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::Bare, + function: xt.function, + }, + ExtrinsicFormat::General(tx_ext) => UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::General(tx_ext), + function: xt.function, + }, } } diff --git a/substrate/client/api/src/client.rs b/substrate/client/api/src/client.rs index 46232c74539c6c230652a753b5fcd4b6761600d8..2de09840e4dfdc15e1f1d1acaa5f3e438de0caca 100644 --- a/substrate/client/api/src/client.rs +++ b/substrate/client/api/src/client.rs @@ -278,7 +278,7 @@ impl fmt::Display for UsageInfo { pub struct UnpinHandleInner { /// Hash of the block pinned by this handle hash: Block::Hash, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, } impl Debug for UnpinHandleInner { @@ -291,7 +291,7 @@ impl UnpinHandleInner { /// Create a new [`UnpinHandleInner`] pub fn new( hash: Block::Hash, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> Self { Self { hash, unpin_worker_sender } } @@ -299,12 +299,25 @@ impl UnpinHandleInner { impl Drop for UnpinHandleInner { fn drop(&mut self) { - if let Err(err) = self.unpin_worker_sender.unbounded_send(self.hash) { + if let Err(err) = + self.unpin_worker_sender.unbounded_send(UnpinWorkerMessage::Unpin(self.hash)) + { log::debug!(target: "db", "Unable to unpin block with hash: {}, error: {:?}", self.hash, err); }; } } +/// Message that signals notification-based pinning actions to the pinning-worker. +/// +/// When the notification is dropped, an `Unpin` message should be sent to the worker. +#[derive(Debug)] +pub enum UnpinWorkerMessage { + /// Should be sent when a import or finality notification is created. + AnnouncePin(Block::Hash), + /// Should be sent when a import or finality notification is dropped. + Unpin(Block::Hash), +} + /// Keeps a specific block pinned while the handle is alive. /// Once the last handle instance for a given block is dropped, the /// block is unpinned in the [`Backend`](crate::backend::Backend::unpin_block). @@ -315,7 +328,7 @@ impl UnpinHandle { /// Create a new [`UnpinHandle`] pub fn new( hash: Block::Hash, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> UnpinHandle { UnpinHandle(Arc::new(UnpinHandleInner::new(hash, unpin_worker_sender))) } @@ -353,7 +366,7 @@ impl BlockImportNotification { header: Block::Header, is_new_best: bool, tree_route: Option>>, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> Self { Self { hash, @@ -412,7 +425,7 @@ impl FinalityNotification { /// Create finality notification from finality summary. pub fn from_summary( mut summary: FinalizeSummary, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> FinalityNotification { let hash = summary.finalized.pop().unwrap_or_default(); FinalityNotification { @@ -436,7 +449,7 @@ impl BlockImportNotification { /// Create finality notification from finality summary. pub fn from_summary( summary: ImportSummary, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> BlockImportNotification { let hash = summary.hash; BlockImportNotification { diff --git a/substrate/client/api/src/notifications/tests.rs b/substrate/client/api/src/notifications/tests.rs index fba829b1cf9020034529b034f52015fb423608d8..7afdcbd95438f54a8a19eb03c29113288488eb54 100644 --- a/substrate/client/api/src/notifications/tests.rs +++ b/substrate/client/api/src/notifications/tests.rs @@ -18,7 +18,10 @@ use super::*; -use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; +use sp_runtime::{ + generic::UncheckedExtrinsic, + testing::{Block as RawBlock, H256 as Hash}, +}; use std::iter::{empty, Empty}; type TestChangeSet = ( @@ -50,7 +53,7 @@ impl PartialEq for StorageChangeSet { } } -type Block = RawBlock>; +type Block = RawBlock>; #[test] fn triggering_change_should_notify_wildcard_listeners() { diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index fdc3d4918de32c9263286a0b3bfe73a01d26538b..932287ac86577b0e1a9b2c057320aeda9d12319a 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -38,7 +38,7 @@ use sp_core::traits::SpawnNamed; use sp_inherents::InherentData; use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT}, - Digest, Percent, SaturatedConversion, + Digest, ExtrinsicInclusionMode, Percent, SaturatedConversion, }; use std::{marker::PhantomData, pin::Pin, sync::Arc, time}; @@ -335,11 +335,12 @@ where self.apply_inherents(&mut block_builder, inherent_data)?; - // TODO call `after_inherents` and check if we should apply extrinsincs here - // - - let end_reason = - self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?; + let mode = block_builder.extrinsic_inclusion_mode(); + let end_reason = match mode { + ExtrinsicInclusionMode::AllExtrinsics => + self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?, + ExtrinsicInclusionMode::OnlyInherents => EndProposingReason::TransactionForbidden, + }; let (block, storage_changes, proof) = block_builder.build()?.into_inner(); let block_took = block_timer.elapsed(); diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index 258e39d962b2de2397d85a54223c155e821ddfa3..2f22cd42591fc60bf1665d00a1c5b2b548ad3aea 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -37,7 +37,7 @@ use sp_core::traits::CallContext; use sp_runtime::{ legacy, traits::{Block as BlockT, Hash, HashingFor, Header as HeaderT, NumberFor, One}, - Digest, + Digest, ExtrinsicInclusionMode, }; use std::marker::PhantomData; @@ -198,10 +198,12 @@ pub struct BlockBuilder<'a, Block: BlockT, C: ProvideRuntimeApi + 'a> { extrinsics: Vec, api: ApiRef<'a, C::Api>, call_api_at: &'a C, + /// Version of the [`BlockBuilderApi`] runtime API. version: u32, parent_hash: Block::Hash, /// The estimated size of the block header. estimated_header_size: usize, + extrinsic_inclusion_mode: ExtrinsicInclusionMode, } impl<'a, Block, C> BlockBuilder<'a, Block, C> @@ -244,9 +246,19 @@ where api.set_call_context(CallContext::Onchain); - api.initialize_block(parent_hash, &header)?; + let core_version = api + .api_version::>(parent_hash)? + .ok_or_else(|| Error::VersionInvalid("Core".to_string()))?; - let version = api + let extrinsic_inclusion_mode = if core_version >= 5 { + api.initialize_block(parent_hash, &header)? + } else { + #[allow(deprecated)] + api.initialize_block_before_version_5(parent_hash, &header)?; + ExtrinsicInclusionMode::AllExtrinsics + }; + + let bb_version = api .api_version::>(parent_hash)? .ok_or_else(|| Error::VersionInvalid("BlockBuilderApi".to_string()))?; @@ -254,12 +266,18 @@ where parent_hash, extrinsics: Vec::new(), api, - version, + version: bb_version, estimated_header_size, call_api_at, + extrinsic_inclusion_mode, }) } + /// The extrinsic inclusion mode of the runtime for this block. + pub fn extrinsic_inclusion_mode(&self) -> ExtrinsicInclusionMode { + self.extrinsic_inclusion_mode + } + /// Push onto the block's list of extrinsics. /// /// This will ensure the extrinsic can be validly executed (by executing it). diff --git a/substrate/client/chain-spec/src/genesis_config_builder.rs b/substrate/client/chain-spec/src/genesis_config_builder.rs index 6b956316203cc1279485fa1c5ad39cc0885cc14d..c8b54f66be6f52ffb6900fafd2e8ec1652030976 100644 --- a/substrate/client/chain-spec/src/genesis_config_builder.rs +++ b/substrate/client/chain-spec/src/genesis_config_builder.rs @@ -81,7 +81,8 @@ where .0 } - /// Returns the default `GenesisConfig` provided by the `runtime`. + /// Returns a json representation of the default `RuntimeGenesisConfig` provided by the + /// `runtime`. /// /// Calls [`GenesisBuilder::create_default_config`](sp_genesis_builder::GenesisBuilder::create_default_config) in the `runtime`. pub fn get_default_config(&self) -> core::result::Result { @@ -94,7 +95,7 @@ where Ok(from_slice(&default_config[..]).expect("returned value is json. qed.")) } - /// Build the given `GenesisConfig` and returns the genesis state. + /// Builds `RuntimeGenesisConfig` from given json blob and returns the genesis state. /// /// Calls [`GenesisBuilder::build_config`](sp_genesis_builder::GenesisBuilder::build_config) /// provided by the `runtime`. @@ -111,25 +112,26 @@ where Ok(ext.into_storages()) } - /// Creates the genesis state by patching the default `GenesisConfig` and applying it. + /// Creates the genesis state by patching the default `RuntimeGenesisConfig`. /// - /// This function generates the `GenesisConfig` for the runtime by applying a provided JSON - /// patch. The patch modifies the default `GenesisConfig` allowing customization of the specific - /// keys. The resulting `GenesisConfig` is then deserialized from the patched JSON - /// representation and stored in the storage. + /// This function generates the `RuntimeGenesisConfig` for the runtime by applying a provided + /// JSON patch. The patch modifies the default `RuntimeGenesisConfig` allowing customization of + /// the specific keys. The resulting `RuntimeGenesisConfig` is then deserialized from the + /// patched JSON representation and stored in the storage. /// /// If the provided JSON patch is incorrect or the deserialization fails the error will be /// returned. /// - /// The patching process modifies the default `GenesisConfig` according to the following rules: + /// The patching process modifies the default `RuntimeGenesisConfig` according to the following + /// rules: /// 1. Existing keys in the default configuration will be overridden by the corresponding values /// in the patch. /// 2. If a key exists in the patch but not in the default configuration, it will be added to - /// the resulting `GenesisConfig`. + /// the resulting `RuntimeGenesisConfig`. /// 3. Keys in the default configuration that have null values in the patch will be removed from - /// the resulting `GenesisConfig`. This is helpful for changing enum variant value. + /// the resulting `RuntimeGenesisConfig`. This is helpful for changing enum variant value. /// - /// Please note that the patch may contain full `GenesisConfig`. + /// Please note that the patch may contain full `RuntimeGenesisConfig`. pub fn get_storage_for_patch(&self, patch: Value) -> core::result::Result { let mut config = self.get_default_config()?; crate::json_patch::merge(&mut config, patch); diff --git a/substrate/client/db/benches/state_access.rs b/substrate/client/db/benches/state_access.rs index e47559e710df1e21dab3d2a57b0c6434dd790289..9f3b8ca77c25e50986de42c5803f8615bbf3d00f 100644 --- a/substrate/client/db/benches/state_access.rs +++ b/substrate/client/db/benches/state_access.rs @@ -22,12 +22,13 @@ use sc_client_api::{Backend as _, BlockImportOperation, NewBlockState, StateBack use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; use sp_core::H256; use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, Header}, + generic::UncheckedExtrinsic, + testing::{Block as RawBlock, Header, MockCallU64}, StateVersion, Storage, }; use tempfile::TempDir; -pub(crate) type Block = RawBlock>; +pub(crate) type Block = RawBlock>; fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 { let mut op = db.begin_operation().unwrap(); diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 870b4150b9df71ff63e5b5be7467b5369caa97c0..f22022ef29a37695e2df1d432dd3b4d4d916c62e 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -2531,7 +2531,7 @@ impl sc_client_api::backend::Backend for Backend { self.storage.state_db.pin(&hash, number.saturated_into::(), hint).map_err( |_| { sp_blockchain::Error::UnknownBlock(format!( - "State already discarded for `{:?}`", + "Unable to pin: state already discarded for `{:?}`", hash )) }, @@ -2573,7 +2573,8 @@ pub(crate) mod tests { use sp_blockchain::{lowest_common_ancestor, tree_route}; use sp_core::H256; use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, Header}, + generic::UncheckedExtrinsic, + testing::{Block as RawBlock, Header, MockCallU64}, traits::{BlakeTwo256, Hash}, ConsensusEngineId, StateVersion, }; @@ -2581,7 +2582,8 @@ pub(crate) mod tests { const CONS0_ENGINE_ID: ConsensusEngineId = *b"CON0"; const CONS1_ENGINE_ID: ConsensusEngineId = *b"CON1"; - pub(crate) type Block = RawBlock>; + type UncheckedXt = UncheckedExtrinsic; + pub(crate) type Block = RawBlock; pub fn insert_header( backend: &Backend, @@ -2600,7 +2602,7 @@ pub(crate) mod tests { parent_hash: H256, _changes: Option, Vec)>>, extrinsics_root: H256, - body: Vec>, + body: Vec, transaction_index: Option>, ) -> Result { use sp_runtime::testing::Digest; @@ -3414,7 +3416,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -3436,11 +3438,20 @@ pub(crate) mod tests { assert_eq!(None, bc.body(blocks[0]).unwrap()); assert_eq!(None, bc.body(blocks[1]).unwrap()); assert_eq!(None, bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); } else { for i in 0..5 { - assert_eq!(Some(vec![(i as u64).into()]), bc.body(blocks[i]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]), + bc.body(blocks[i]).unwrap() + ); } } } @@ -3464,7 +3475,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -3473,16 +3484,26 @@ pub(crate) mod tests { } // insert a fork at block 2 - let fork_hash_root = - insert_block(&backend, 2, blocks[1], None, H256::random(), vec![2.into()], None) - .unwrap(); + let fork_hash_root = insert_block( + &backend, + 2, + blocks[1], + None, + H256::random(), + vec![UncheckedXt::new_transaction(2.into(), ())], + None, + ) + .unwrap(); insert_block( &backend, 3, fork_hash_root, None, H256::random(), - vec![3.into(), 11.into()], + vec![ + UncheckedXt::new_transaction(3.into(), ()), + UncheckedXt::new_transaction(11.into(), ()), + ], None, ) .unwrap(); @@ -3492,7 +3513,10 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); - assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(fork_hash_root).unwrap() + ); for i in 1..5 { let mut op = backend.begin_operation().unwrap(); @@ -3506,16 +3530,28 @@ pub(crate) mod tests { assert_eq!(None, bc.body(blocks[1]).unwrap()); assert_eq!(None, bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); } else { for i in 0..5 { - assert_eq!(Some(vec![(i as u64).into()]), bc.body(blocks[i]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]), + bc.body(blocks[i]).unwrap() + ); } } if matches!(pruning, BlocksPruning::KeepAll) { - assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(fork_hash_root).unwrap() + ); } else { assert_eq!(None, bc.body(fork_hash_root).unwrap()); } @@ -3536,8 +3572,16 @@ pub(crate) mod tests { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(10), 10); let make_block = |index, parent, val: u64| { - insert_block(&backend, index, parent, None, H256::random(), vec![val.into()], None) - .unwrap() + insert_block( + &backend, + index, + parent, + None, + H256::random(), + vec![UncheckedXt::new_transaction(val.into(), ())], + None, + ) + .unwrap() }; let block_0 = make_block(0, Default::default(), 0x00); @@ -3565,18 +3609,30 @@ pub(crate) mod tests { let bc = backend.blockchain(); assert_eq!(None, bc.body(block_1b).unwrap()); assert_eq!(None, bc.body(block_2b).unwrap()); - assert_eq!(Some(vec![0x00.into()]), bc.body(block_0).unwrap()); - assert_eq!(Some(vec![0x1a.into()]), bc.body(block_1a).unwrap()); - assert_eq!(Some(vec![0x2a.into()]), bc.body(block_2a).unwrap()); - assert_eq!(Some(vec![0x3a.into()]), bc.body(block_3a).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x00.into(), ())]), + bc.body(block_0).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x1a.into(), ())]), + bc.body(block_1a).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x2a.into(), ())]), + bc.body(block_2a).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x3a.into(), ())]), + bc.body(block_3a).unwrap() + ); } #[test] fn indexed_data_block_body() { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); - let x0 = ExtrinsicWrapper::from(0u64).encode(); - let x1 = ExtrinsicWrapper::from(1u64).encode(); + let x0 = UncheckedXt::new_transaction(0.into(), ()).encode(); + let x1 = UncheckedXt::new_transaction(1.into(), ()).encode(); let x0_hash = as sp_core::Hasher>::hash(&x0[1..]); let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); let index = vec![ @@ -3597,7 +3653,10 @@ pub(crate) mod tests { Default::default(), None, Default::default(), - vec![0u64.into(), 1u64.into()], + vec![ + UncheckedXt::new_transaction(0.into(), ()), + UncheckedXt::new_transaction(1.into(), ()), + ], Some(index), ) .unwrap(); @@ -3619,8 +3678,9 @@ pub(crate) mod tests { fn index_invalid_size() { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); - let x0 = ExtrinsicWrapper::from(0u64).encode(); - let x1 = ExtrinsicWrapper::from(1u64).encode(); + let x0 = UncheckedXt::new_transaction(0.into(), ()).encode(); + let x1 = UncheckedXt::new_transaction(1.into(), ()).encode(); + let x0_hash = as sp_core::Hasher>::hash(&x0[..]); let x1_hash = as sp_core::Hasher>::hash(&x1[..]); let index = vec![ @@ -3641,7 +3701,10 @@ pub(crate) mod tests { Default::default(), None, Default::default(), - vec![0u64.into(), 1u64.into()], + vec![ + UncheckedXt::new_transaction(0.into(), ()), + UncheckedXt::new_transaction(1.into(), ()), + ], Some(index), ) .unwrap(); @@ -3655,7 +3718,7 @@ pub(crate) mod tests { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); - let x1 = ExtrinsicWrapper::from(0u64).encode(); + let x1 = UncheckedXt::new_transaction(0.into(), ()).encode(); let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); for i in 0..10 { let mut index = Vec::new(); @@ -3675,7 +3738,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], Some(index), ) .unwrap(); @@ -3709,7 +3772,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -3724,7 +3787,7 @@ pub(crate) mod tests { blocks[1], None, sp_core::H256::random(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -3738,7 +3801,7 @@ pub(crate) mod tests { blocks[0], None, sp_core::H256::random(), - vec![42.into()], + vec![UncheckedXt::new_transaction(42.into(), ())], None, ) .unwrap(); @@ -4211,7 +4274,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -4226,7 +4289,10 @@ pub(crate) mod tests { // Check that we can properly access values when there is reference count // but no value. - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(1.into(), ())]), + bc.body(blocks[1]).unwrap() + ); // Block 1 gets pinned three times backend.pin_block(blocks[1]).unwrap(); @@ -4243,27 +4309,42 @@ pub(crate) mod tests { // Block 0, 1, 2, 3 are pinned, so all values should be cached. // Block 4 is inside the pruning window, its value is in db. - assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0.into(), ())]), + bc.body(blocks[0]).unwrap() + ); - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(1.into(), ())]), + bc.body(blocks[1]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(1))), bc.justifications(blocks[1]).unwrap() ); - assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(blocks[2]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(2))), bc.justifications(blocks[2]).unwrap() ); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(3))), bc.justifications(blocks[3]).unwrap() ); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(4))), bc.justifications(blocks[4]).unwrap() @@ -4294,7 +4375,10 @@ pub(crate) mod tests { assert!(bc.justifications(blocks[1]).unwrap().is_none()); // Block 4 is inside the pruning window and still kept - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(4))), bc.justifications(blocks[4]).unwrap() @@ -4302,9 +4386,16 @@ pub(crate) mod tests { // Block tree: // 0 -> 1 -> 2 -> 3 -> 4 -> 5 - let hash = - insert_block(&backend, 5, prev_hash, None, Default::default(), vec![5.into()], None) - .unwrap(); + let hash = insert_block( + &backend, + 5, + prev_hash, + None, + Default::default(), + vec![UncheckedXt::new_transaction(5.into(), ())], + None, + ) + .unwrap(); blocks.push(hash); backend.pin_block(blocks[4]).unwrap(); @@ -4319,12 +4410,18 @@ pub(crate) mod tests { assert!(bc.body(blocks[2]).unwrap().is_none()); assert!(bc.body(blocks[3]).unwrap().is_none()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(4))), bc.justifications(blocks[4]).unwrap() ); - assert_eq!(Some(vec![5.into()]), bc.body(blocks[5]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(5.into(), ())]), + bc.body(blocks[5]).unwrap() + ); assert!(bc.header(blocks[5]).ok().flatten().is_some()); backend.unpin_block(blocks[4]); @@ -4334,9 +4431,16 @@ pub(crate) mod tests { // Append a justification to block 5. backend.append_justification(blocks[5], ([0, 0, 0, 1], vec![42])).unwrap(); - let hash = - insert_block(&backend, 6, blocks[5], None, Default::default(), vec![6.into()], None) - .unwrap(); + let hash = insert_block( + &backend, + 6, + blocks[5], + None, + Default::default(), + vec![UncheckedXt::new_transaction(6.into(), ())], + None, + ) + .unwrap(); blocks.push(hash); // Pin block 5 so it gets loaded into the cache on prune @@ -4349,7 +4453,10 @@ pub(crate) mod tests { op.mark_finalized(blocks[6], None).unwrap(); backend.commit_operation(op).unwrap(); - assert_eq!(Some(vec![5.into()]), bc.body(blocks[5]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(5.into(), ())]), + bc.body(blocks[5]).unwrap() + ); assert!(bc.header(blocks[5]).ok().flatten().is_some()); let mut expected = Justifications::from(build_justification(5)); expected.append(([0, 0, 0, 1], vec![42])); @@ -4371,7 +4478,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -4387,16 +4494,26 @@ pub(crate) mod tests { // Block tree: // 0 -> 1 -> 2 -> 3 -> 4 // \ -> 2 -> 3 - let fork_hash_root = - insert_block(&backend, 2, blocks[1], None, H256::random(), vec![2.into()], None) - .unwrap(); + let fork_hash_root = insert_block( + &backend, + 2, + blocks[1], + None, + H256::random(), + vec![UncheckedXt::new_transaction(2.into(), ())], + None, + ) + .unwrap(); let fork_hash_3 = insert_block( &backend, 3, fork_hash_root, None, H256::random(), - vec![3.into(), 11.into()], + vec![ + UncheckedXt::new_transaction(3.into(), ()), + UncheckedXt::new_transaction(11.into(), ()), + ], None, ) .unwrap(); @@ -4417,14 +4534,35 @@ pub(crate) mod tests { } let bc = backend.blockchain(); - assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0.into(), ())]), + bc.body(blocks[0]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(1.into(), ())]), + bc.body(blocks[1]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(blocks[2]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); // Check the fork hashes. assert_eq!(None, bc.body(fork_hash_root).unwrap()); - assert_eq!(Some(vec![3.into(), 11.into()]), bc.body(fork_hash_3).unwrap()); + assert_eq!( + Some(vec![ + UncheckedXt::new_transaction(3.into(), ()), + UncheckedXt::new_transaction(11.into(), ()) + ]), + bc.body(fork_hash_3).unwrap() + ); // Unpin all blocks, except the forked one. for block in &blocks { diff --git a/substrate/client/db/src/pinned_blocks_cache.rs b/substrate/client/db/src/pinned_blocks_cache.rs index 46c9287fb19ac1361153dbb65f4f0b353170042f..ac4aad07765cfbaa4d9c66efa4b44730b0b0a04c 100644 --- a/substrate/client/db/src/pinned_blocks_cache.rs +++ b/substrate/client/db/src/pinned_blocks_cache.rs @@ -20,7 +20,7 @@ use schnellru::{Limiter, LruMap}; use sp_runtime::{traits::Block as BlockT, Justifications}; const LOG_TARGET: &str = "db::pin"; -const PINNING_CACHE_SIZE: usize = 1024; +const PINNING_CACHE_SIZE: usize = 2048; /// Entry for pinned blocks cache. struct PinnedBlockCacheEntry { diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs index abf9c4629cee47e31d18a18c827212b48fe31511..d2a5f7e718a6f0af4188fc18ed0c551a51f5c974 100644 --- a/substrate/client/db/src/utils.rs +++ b/substrate/client/db/src/utils.rs @@ -582,14 +582,19 @@ impl<'a, 'b> codec::Input for JoinInput<'a, 'b> { mod tests { use super::*; use codec::Input; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; - type Block = RawBlock>; + use sp_runtime::{ + generic::UncheckedExtrinsic, + testing::{Block as RawBlock, MockCallU64}, + }; + + pub type UncheckedXt = UncheckedExtrinsic; + type Block = RawBlock; #[cfg(feature = "rocksdb")] #[test] fn database_type_subdir_migration() { use std::path::PathBuf; - type Block = RawBlock>; + type Block = RawBlock; fn check_dir_for_db_type( db_type: DatabaseType, diff --git a/substrate/client/executor/src/executor.rs b/substrate/client/executor/src/executor.rs index 499bb704b16990de49d2b3311c48fa8ee2c1813e..d56a3b389ef42868d0543303c5fbd63ea8d2d884 100644 --- a/substrate/client/executor/src/executor.rs +++ b/substrate/client/executor/src/executor.rs @@ -518,7 +518,7 @@ where runtime_code, ext, heap_alloc_strategy, - |_, mut instance, _onchain_version, mut ext| { + |_, mut instance, _on_chain_version, mut ext| { with_externalities_safe(&mut **ext, move || instance.call_export(method, data)) }, ); @@ -682,18 +682,18 @@ impl CodeExecutor for NativeElseWasmExecut runtime_code, ext, heap_alloc_strategy, - |_, mut instance, onchain_version, mut ext| { - let onchain_version = - onchain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; + |_, mut instance, on_chain_version, mut ext| { + let on_chain_version = + on_chain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; let can_call_with = - onchain_version.can_call_with(&self.native_version.runtime_version); + on_chain_version.can_call_with(&self.native_version.runtime_version); if use_native && can_call_with { tracing::trace!( target: "executor", native = %self.native_version.runtime_version, - chain = %onchain_version, + chain = %on_chain_version, "Request for native execution succeeded", ); @@ -705,7 +705,7 @@ impl CodeExecutor for NativeElseWasmExecut tracing::trace!( target: "executor", native = %self.native_version.runtime_version, - chain = %onchain_version, + chain = %on_chain_version, "Request for native execution failed", ); } diff --git a/substrate/client/network-gossip/src/state_machine.rs b/substrate/client/network-gossip/src/state_machine.rs index 069d7cdba16599b4b4da0965a5d8e4588478d633..f1c830341ea7337df312e9181ef0ba7caafdc2ce 100644 --- a/substrate/client/network-gossip/src/state_machine.rs +++ b/substrate/client/network-gossip/src/state_machine.rs @@ -550,7 +550,8 @@ mod tests { NotificationSenderError, NotificationSenderT as NotificationSender, ReputationChange, }; use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, H256}, + generic::UncheckedExtrinsic, + testing::{Block as RawBlock, MockCallU64, H256}, traits::NumberFor, }; use std::{ @@ -559,7 +560,7 @@ mod tests { sync::{Arc, Mutex}, }; - type Block = RawBlock>; + type Block = RawBlock>; macro_rules! push_msg { ($consensus:expr, $topic:expr, $hash: expr, $m:expr) => { diff --git a/substrate/client/network/sync/src/blocks.rs b/substrate/client/network/sync/src/blocks.rs index 4988045a4786720771ad84d9bd3b3cb0aa96592a..a115ee94767454c14755517be82609dd22409cf4 100644 --- a/substrate/client/network/sync/src/blocks.rs +++ b/substrate/client/network/sync/src/blocks.rs @@ -265,9 +265,12 @@ mod test { use libp2p::PeerId; use sc_network_common::sync::message; use sp_core::H256; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; + use sp_runtime::{ + generic::UncheckedExtrinsic, + testing::{Block as RawBlock, MockCallU64}, + }; - type Block = RawBlock>; + type Block = RawBlock>; fn is_empty(bc: &BlockCollection) -> bool { bc.blocks.is_empty() && bc.peer_requests.is_empty() diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index c1a7009eeb01745ca5a7069eb2a258768ad2c49c..24640fdc45576191f30d190a39ee8186d27520c8 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -656,6 +656,8 @@ where Some(event) => self.process_notification_event(event), None => return, }, + // TODO: setting of warp sync target block should be moved to the initialization of + // `SyncingEngine`, see https://github.com/paritytech/polkadot-sdk/issues/3537. warp_target_block_header = &mut self.warp_sync_target_block_header_rx_fused => { if let Err(_) = self.pass_warp_sync_target_block_header(warp_target_block_header) { error!( diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index e23a23e735d3e25b60b6b5bda8dd4beadcfe9377..9f6c0f45d089ce415f9e8c0ae792de1d7a327407 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -28,7 +28,7 @@ mod justification_requests; mod pending_responses; mod request_metrics; mod schema; -mod types; +pub mod types; pub mod block_relay_protocol; pub mod block_request_handler; diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 7d6e6a8d3b8b79e9e926261d298e4fcacf7c714e..dabcf37ae6321a4501095a4ba3140c1ccf424b74 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -30,7 +30,7 @@ use crate::{ }; use chain_sync::{ChainSync, ChainSyncAction, ChainSyncMode}; use libp2p::PeerId; -use log::{debug, error, info}; +use log::{debug, error, info, warn}; use prometheus_endpoint::Registry; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; @@ -159,7 +159,7 @@ impl From> for SyncingAction { /// Proxy to specific syncing strategies. pub struct SyncingStrategy { - /// Syncing configuration. + /// Initial syncing configuration. config: SyncingConfig, /// Client used by syncing strategies. client: Arc, @@ -466,15 +466,27 @@ where &mut self, target_header: B::Header, ) -> Result<(), ()> { - match self.warp { - Some(ref mut warp) => { - warp.set_target_block(target_header); - Ok(()) + match self.config.mode { + SyncMode::Warp => match self.warp { + Some(ref mut warp) => { + warp.set_target_block(target_header); + Ok(()) + }, + None => { + // As mode is set to warp sync, but no warp sync strategy is active, this means + // that warp sync has already finished / was skipped. + warn!( + target: LOG_TARGET, + "Discarding warp sync target, as warp sync was seemingly skipped due \ + to node being (partially) synced.", + ); + Ok(()) + }, }, - None => { + _ => { error!( target: LOG_TARGET, - "Cannot set warp sync target block: no warp sync strategy is active." + "Cannot set warp sync target block: not in warp sync mode." ); debug_assert!(false); Err(()) @@ -587,3 +599,63 @@ where } } } + +#[cfg(test)] +mod test { + use super::*; + use futures::executor::block_on; + use sc_block_builder::BlockBuilderBuilder; + use substrate_test_runtime_client::{ + ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClientBuilder, + TestClientBuilderExt, + }; + + /// Regression test for crash when starting already synced parachain node with `--sync=warp`. + /// We must remove this after setting of warp sync target block is moved to initialization of + /// `SyncingEngine` (issue https://github.com/paritytech/polkadot-sdk/issues/3537). + #[test] + fn set_target_block_finished_warp_sync() { + // Populate database with finalized state. + let mut client = Arc::new(TestClientBuilder::new().build()); + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let just = (*b"TEST", Vec::new()); + client.finalize_block(block.hash(), Some(just)).unwrap(); + let target_block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + + // Initialize syncing strategy. + let config = SyncingConfig { + mode: SyncMode::Warp, + max_parallel_downloads: 3, + max_blocks_per_request: 64, + metrics_registry: None, + }; + let mut strategy = + SyncingStrategy::new(config, client, Some(WarpSyncConfig::WaitForTarget)).unwrap(); + + // Warp sync instantly finishes as we have finalized state in DB. + let actions = strategy.actions().unwrap(); + assert_eq!(actions.len(), 1); + assert!(matches!(actions[0], SyncingAction::Finished)); + assert!(strategy.warp.is_none()); + + // Try setting the target block. We mustn't crash. + strategy + .set_warp_sync_target_block_header(target_block.header().clone()) + .unwrap(); + } +} diff --git a/substrate/client/proposer-metrics/src/lib.rs b/substrate/client/proposer-metrics/src/lib.rs index 012e8ca769a96cb51141c5a13ca7ba04d76a8123..2856300cf8027b45bb0ef32b8781c533ae2e340c 100644 --- a/substrate/client/proposer-metrics/src/lib.rs +++ b/substrate/client/proposer-metrics/src/lib.rs @@ -44,11 +44,14 @@ impl MetricsLink { } /// The reason why proposing a block ended. +#[derive(Clone, Copy, PartialEq, Eq)] pub enum EndProposingReason { NoMoreTransactions, HitDeadline, HitBlockSizeLimit, HitBlockWeightLimit, + /// No transactions are allowed in the block. + TransactionForbidden, } /// Authorship metrics. @@ -112,6 +115,7 @@ impl Metrics { EndProposingReason::NoMoreTransactions => "no_more_transactions", EndProposingReason::HitBlockSizeLimit => "hit_block_size_limit", EndProposingReason::HitBlockWeightLimit => "hit_block_weight_limit", + EndProposingReason::TransactionForbidden => "transactions_forbidden", }; self.end_proposing_reason.with_label_values(&[reason]).inc(); diff --git a/substrate/client/rpc-servers/Cargo.toml b/substrate/client/rpc-servers/Cargo.toml index 7f7a86799e0cc6510fc4ab7f0d3e1c2c3070b85f..3adc81c57d594a355212a88f83682a0441bc7acc 100644 --- a/substrate/client/rpc-servers/Cargo.toml +++ b/substrate/client/rpc-servers/Cargo.toml @@ -26,5 +26,4 @@ tower = { version = "0.4.13", features = ["util"] } http = "0.2.8" hyper = "0.14.27" futures = "0.3.29" -pin-project = "1.1.3" governor = "0.6.0" diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index e65d954bed5266746f2c163e865613f255356c44..ad4b444c7ff425ea84ba25e756d61854c3a4569e 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -49,7 +49,7 @@ pub use jsonrpsee::{ }, server::{middleware::rpc::RpcServiceBuilder, BatchRequestConfig}, }; -pub use middleware::{MetricsLayer, RateLimitLayer, RpcMetrics}; +pub use middleware::{Metrics, MiddlewareLayer, RpcMetrics}; const MEGABYTE: u32 = 1024 * 1024; @@ -173,13 +173,22 @@ where let is_websocket = ws::is_upgrade_request(&req); let transport_label = if is_websocket { "ws" } else { "http" }; - let metrics = metrics.map(|m| MetricsLayer::new(m, transport_label)); - let rate_limit = rate_limit.map(|r| RateLimitLayer::per_minute(r)); + let middleware_layer = match (metrics, rate_limit) { + (None, None) => None, + (Some(metrics), None) => Some( + MiddlewareLayer::new().with_metrics(Metrics::new(metrics, transport_label)), + ), + (None, Some(rate_limit)) => + Some(MiddlewareLayer::new().with_rate_limit_per_minute(rate_limit)), + (Some(metrics), Some(rate_limit)) => Some( + MiddlewareLayer::new() + .with_metrics(Metrics::new(metrics, transport_label)) + .with_rate_limit_per_minute(rate_limit), + ), + }; - // NOTE: The metrics needs to run first to include rate-limited calls in the - // metrics. let rpc_middleware = - RpcServiceBuilder::new().option_layer(metrics.clone()).option_layer(rate_limit); + RpcServiceBuilder::new().option_layer(middleware_layer.clone()); let mut svc = service_builder.set_rpc_middleware(rpc_middleware).build(methods, stop_handle); @@ -191,9 +200,9 @@ where // Spawn a task to handle when the connection is closed. tokio_handle.spawn(async move { let now = std::time::Instant::now(); - metrics.as_ref().map(|m| m.ws_connect()); + middleware_layer.as_ref().map(|m| m.ws_connect()); on_disconnect.await; - metrics.as_ref().map(|m| m.ws_disconnect(now)); + middleware_layer.as_ref().map(|m| m.ws_disconnect(now)); }); } diff --git a/substrate/client/rpc-servers/src/middleware/metrics.rs b/substrate/client/rpc-servers/src/middleware/metrics.rs index c2d1956c3b3985e8e071ad5316dcc2a150410415..17849dc0c44846c10ed06b0f86d39783c5d29dc1 100644 --- a/substrate/client/rpc-servers/src/middleware/metrics.rs +++ b/substrate/client/rpc-servers/src/middleware/metrics.rs @@ -18,15 +18,9 @@ //! RPC middleware to collect prometheus metrics on RPC calls. -use std::{ - future::Future, - pin::Pin, - task::{Context, Poll}, - time::Instant, -}; +use std::time::Instant; -use jsonrpsee::{server::middleware::rpc::RpcServiceT, types::Request, MethodResponse}; -use pin_project::pin_project; +use jsonrpsee::{types::Request, MethodResponse}; use prometheus_endpoint::{ register, Counter, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, @@ -77,7 +71,7 @@ impl RpcMetrics { "Total time [μs] of processed RPC calls", ) .buckets(HISTOGRAM_BUCKETS.to_vec()), - &["protocol", "method"], + &["protocol", "method", "is_rate_limited"], )?, metrics_registry, )?, @@ -97,7 +91,7 @@ impl RpcMetrics { "substrate_rpc_calls_finished", "Number of processed RPC calls (unique un-batched requests)", ), - &["protocol", "method", "is_error"], + &["protocol", "method", "is_error", "is_rate_limited"], )?, metrics_registry, )?, @@ -144,138 +138,90 @@ impl RpcMetrics { self.ws_sessions_closed.as_ref().map(|counter| counter.inc()); self.ws_sessions_time.with_label_values(&["ws"]).observe(micros as _); } -} - -/// Metrics layer. -#[derive(Clone)] -pub struct MetricsLayer { - inner: RpcMetrics, - transport_label: &'static str, -} - -impl MetricsLayer { - /// Create a new [`MetricsLayer`]. - pub fn new(metrics: RpcMetrics, transport_label: &'static str) -> Self { - Self { inner: metrics, transport_label } - } - - pub(crate) fn ws_connect(&self) { - self.inner.ws_connect(); - } - - pub(crate) fn ws_disconnect(&self, now: Instant) { - self.inner.ws_disconnect(now) - } -} - -impl tower::Layer for MetricsLayer { - type Service = Metrics; - - fn layer(&self, inner: S) -> Self::Service { - Metrics::new(inner, self.inner.clone(), self.transport_label) - } -} - -/// Metrics middleware. -#[derive(Clone)] -pub struct Metrics { - service: S, - metrics: RpcMetrics, - transport_label: &'static str, -} - -impl Metrics { - /// Create a new metrics middleware. - pub fn new(service: S, metrics: RpcMetrics, transport_label: &'static str) -> Metrics { - Metrics { service, metrics, transport_label } - } -} - -impl<'a, S> RpcServiceT<'a> for Metrics -where - S: Send + Sync + RpcServiceT<'a>, -{ - type Future = ResponseFuture<'a, S::Future>; - - fn call(&self, req: Request<'a>) -> Self::Future { - let now = Instant::now(); + pub(crate) fn on_call(&self, req: &Request, transport_label: &'static str) { log::trace!( target: "rpc_metrics", - "[{}] on_call name={} params={:?}", - self.transport_label, + "[{transport_label}] on_call name={} params={:?}", req.method_name(), req.params(), ); - self.metrics - .calls_started - .with_label_values(&[self.transport_label, req.method_name()]) + + self.calls_started + .with_label_values(&[transport_label, req.method_name()]) .inc(); + } - ResponseFuture { - fut: self.service.call(req.clone()), - metrics: self.metrics.clone(), - req, - now, - transport_label: self.transport_label, - } + pub(crate) fn on_response( + &self, + req: &Request, + rp: &MethodResponse, + is_rate_limited: bool, + transport_label: &'static str, + now: Instant, + ) { + log::trace!(target: "rpc_metrics", "[{transport_label}] on_response started_at={:?}", now); + log::trace!(target: "rpc_metrics::extra", "[{transport_label}] result={}", rp.as_result()); + + let micros = now.elapsed().as_micros(); + log::debug!( + target: "rpc_metrics", + "[{transport_label}] {} call took {} μs", + req.method_name(), + micros, + ); + self.calls_time + .with_label_values(&[ + transport_label, + req.method_name(), + if is_rate_limited { "true" } else { "false" }, + ]) + .observe(micros as _); + self.calls_finished + .with_label_values(&[ + transport_label, + req.method_name(), + // the label "is_error", so `success` should be regarded as false + // and vice-versa to be registrered correctly. + if rp.is_success() { "false" } else { "true" }, + if is_rate_limited { "true" } else { "false" }, + ]) + .inc(); } } -/// Response future for metrics. -#[pin_project] -pub struct ResponseFuture<'a, F> { - #[pin] - fut: F, - metrics: RpcMetrics, - req: Request<'a>, - now: Instant, - transport_label: &'static str, +/// Metrics with transport label. +#[derive(Clone, Debug)] +pub struct Metrics { + pub(crate) inner: RpcMetrics, + pub(crate) transport_label: &'static str, } -impl<'a, F> std::fmt::Debug for ResponseFuture<'a, F> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str("ResponseFuture") +impl Metrics { + /// Create a new [`Metrics`]. + pub fn new(metrics: RpcMetrics, transport_label: &'static str) -> Self { + Self { inner: metrics, transport_label } } -} -impl<'a, F: Future> Future for ResponseFuture<'a, F> { - type Output = F::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.project(); + pub(crate) fn ws_connect(&self) { + self.inner.ws_connect(); + } - let res = this.fut.poll(cx); - if let Poll::Ready(rp) = &res { - let method_name = this.req.method_name(); - let transport_label = &this.transport_label; - let now = this.now; - let metrics = &this.metrics; + pub(crate) fn ws_disconnect(&self, now: Instant) { + self.inner.ws_disconnect(now) + } - log::trace!(target: "rpc_metrics", "[{transport_label}] on_response started_at={:?}", now); - log::trace!(target: "rpc_metrics::extra", "[{transport_label}] result={:?}", rp); + pub(crate) fn on_call(&self, req: &Request) { + self.inner.on_call(req, self.transport_label) + } - let micros = now.elapsed().as_micros(); - log::debug!( - target: "rpc_metrics", - "[{transport_label}] {method_name} call took {} μs", - micros, - ); - metrics - .calls_time - .with_label_values(&[transport_label, method_name]) - .observe(micros as _); - metrics - .calls_finished - .with_label_values(&[ - transport_label, - method_name, - // the label "is_error", so `success` should be regarded as false - // and vice-versa to be registrered correctly. - if rp.is_success() { "false" } else { "true" }, - ]) - .inc(); - } - res + pub(crate) fn on_response( + &self, + req: &Request, + rp: &MethodResponse, + is_rate_limited: bool, + now: Instant, + ) { + self.inner.on_response(req, rp, is_rate_limited, self.transport_label, now) } } diff --git a/substrate/client/rpc-servers/src/middleware/mod.rs b/substrate/client/rpc-servers/src/middleware/mod.rs index cac516913d327f46ccf6ba1f81c7fb447873509b..88ed8b2f433580fa2d87f1a70eacc32019027f2a 100644 --- a/substrate/client/rpc-servers/src/middleware/mod.rs +++ b/substrate/client/rpc-servers/src/middleware/mod.rs @@ -18,10 +18,131 @@ //! JSON-RPC specific middleware. -/// Grafana metrics middleware. -pub mod metrics; -/// Rate limit middleware. -pub mod rate_limit; +use std::{ + num::NonZeroU32, + time::{Duration, Instant}, +}; + +use futures::future::{BoxFuture, FutureExt}; +use governor::{clock::Clock, Jitter}; +use jsonrpsee::{ + server::middleware::rpc::RpcServiceT, + types::{ErrorObject, Id, Request}, + MethodResponse, +}; + +mod metrics; +mod rate_limit; pub use metrics::*; pub use rate_limit::*; + +const MAX_JITTER: Duration = Duration::from_millis(50); +const MAX_RETRIES: usize = 10; + +/// JSON-RPC middleware layer. +#[derive(Debug, Clone, Default)] +pub struct MiddlewareLayer { + rate_limit: Option, + metrics: Option, +} + +impl MiddlewareLayer { + /// Create an empty MiddlewareLayer. + pub fn new() -> Self { + Self::default() + } + + /// Enable new rate limit middleware enforced per minute. + pub fn with_rate_limit_per_minute(self, n: NonZeroU32) -> Self { + Self { rate_limit: Some(RateLimit::per_minute(n)), metrics: self.metrics } + } + + /// Enable metrics middleware. + pub fn with_metrics(self, metrics: Metrics) -> Self { + Self { rate_limit: self.rate_limit, metrics: Some(metrics) } + } + + /// Register a new websocket connection. + pub fn ws_connect(&self) { + self.metrics.as_ref().map(|m| m.ws_connect()); + } + + /// Register that a websocket connection was closed. + pub fn ws_disconnect(&self, now: Instant) { + self.metrics.as_ref().map(|m| m.ws_disconnect(now)); + } +} + +impl tower::Layer for MiddlewareLayer { + type Service = Middleware; + + fn layer(&self, service: S) -> Self::Service { + Middleware { service, rate_limit: self.rate_limit.clone(), metrics: self.metrics.clone() } + } +} + +/// JSON-RPC middleware that handles metrics +/// and rate-limiting. +/// +/// These are part of the same middleware +/// because the metrics needs to know whether +/// a call was rate-limited or not because +/// it will impact the roundtrip for a call. +pub struct Middleware { + service: S, + rate_limit: Option, + metrics: Option, +} + +impl<'a, S> RpcServiceT<'a> for Middleware +where + S: Send + Sync + RpcServiceT<'a> + Clone + 'static, +{ + type Future = BoxFuture<'a, MethodResponse>; + + fn call(&self, req: Request<'a>) -> Self::Future { + let now = Instant::now(); + + self.metrics.as_ref().map(|m| m.on_call(&req)); + + let service = self.service.clone(); + let rate_limit = self.rate_limit.clone(); + let metrics = self.metrics.clone(); + + async move { + let mut is_rate_limited = false; + + if let Some(limit) = rate_limit.as_ref() { + let mut attempts = 0; + let jitter = Jitter::up_to(MAX_JITTER); + + loop { + if attempts >= MAX_RETRIES { + return reject_too_many_calls(req.id); + } + + if let Err(rejected) = limit.inner.check() { + tokio::time::sleep(jitter + rejected.wait_time_from(limit.clock.now())) + .await; + } else { + break; + } + + is_rate_limited = true; + attempts += 1; + } + } + + let rp = service.call(req.clone()).await; + metrics.as_ref().map(|m| m.on_response(&req, &rp, is_rate_limited, now)); + + rp + } + .boxed() + } +} + +fn reject_too_many_calls(id: Id) -> MethodResponse { + MethodResponse::error(id, ErrorObject::owned(-32999, "RPC rate limit exceeded", None::<()>)) +} diff --git a/substrate/client/rpc-servers/src/middleware/rate_limit.rs b/substrate/client/rpc-servers/src/middleware/rate_limit.rs index cdcc3ebf66f7d0ab9d2ba515b0c128b55281f590..23335eb37686c30d6bf148a2fd38f75013aef606 100644 --- a/substrate/client/rpc-servers/src/middleware/rate_limit.rs +++ b/substrate/client/rpc-servers/src/middleware/rate_limit.rs @@ -16,92 +16,32 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! RPC rate limiting middleware. +//! RPC rate limit. -use std::{num::NonZeroU32, sync::Arc, time::Duration}; - -use futures::future::{BoxFuture, FutureExt}; use governor::{ - clock::{Clock, DefaultClock, QuantaClock}, + clock::{DefaultClock, QuantaClock}, middleware::NoOpMiddleware, state::{InMemoryState, NotKeyed}, - Jitter, -}; -use jsonrpsee::{ - server::middleware::rpc::RpcServiceT, - types::{ErrorObject, Id, Request}, - MethodResponse, + Quota, }; +use std::{num::NonZeroU32, sync::Arc}; type RateLimitInner = governor::RateLimiter; -const MAX_JITTER: Duration = Duration::from_millis(50); -const MAX_RETRIES: usize = 10; - -/// JSON-RPC rate limit middleware layer. +/// Rate limit. #[derive(Debug, Clone)] -pub struct RateLimitLayer(governor::Quota); - -impl RateLimitLayer { - /// Create new rate limit enforced per minute. - pub fn per_minute(n: NonZeroU32) -> Self { - Self(governor::Quota::per_minute(n)) - } +pub struct RateLimit { + pub(crate) inner: Arc, + pub(crate) clock: QuantaClock, } -/// JSON-RPC rate limit middleware -pub struct RateLimit { - service: S, - rate_limit: Arc, - clock: QuantaClock, -} - -impl tower::Layer for RateLimitLayer { - type Service = RateLimit; - - fn layer(&self, service: S) -> Self::Service { +impl RateLimit { + /// Create a new `RateLimit` per minute. + pub fn per_minute(n: NonZeroU32) -> Self { let clock = QuantaClock::default(); - RateLimit { - service, - rate_limit: Arc::new(RateLimitInner::direct_with_clock(self.0, &clock)), + Self { + inner: Arc::new(RateLimitInner::direct_with_clock(Quota::per_minute(n), &clock)), clock, } } } - -impl<'a, S> RpcServiceT<'a> for RateLimit -where - S: Send + Sync + RpcServiceT<'a> + Clone + 'static, -{ - type Future = BoxFuture<'a, MethodResponse>; - - fn call(&self, req: Request<'a>) -> Self::Future { - let service = self.service.clone(); - let rate_limit = self.rate_limit.clone(); - let clock = self.clock.clone(); - - async move { - let mut attempts = 0; - let jitter = Jitter::up_to(MAX_JITTER); - - loop { - if attempts >= MAX_RETRIES { - break reject_too_many_calls(req.id); - } - - if let Err(rejected) = rate_limit.check() { - tokio::time::sleep(jitter + rejected.wait_time_from(clock.now())).await; - } else { - break service.call(req).await; - } - - attempts += 1; - } - } - .boxed() - } -} - -fn reject_too_many_calls(id: Id) -> MethodResponse { - MethodResponse::error(id, ErrorObject::owned(-32999, "RPC rate limit exceeded", None::<()>)) -} diff --git a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs index d63a98a5cb0d93b3633864bd3405ad38c2b6e799..e81bd4bfa0b04aa3ad9e25e47e83e9ace1c7f985 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs @@ -63,7 +63,7 @@ impl ChainHeadMockClient { BlockImportNotification::new(header.hash(), BlockOrigin::Own, header, true, None, sink); for sink in self.import_sinks.lock().iter_mut() { - sink.unbounded_send(notification.clone()).unwrap(); + let _ = sink.unbounded_send(notification.clone()); } } @@ -83,7 +83,7 @@ impl ChainHeadMockClient { let notification = FinalityNotification::from_summary(summary, sink); for sink in self.finality_sinks.lock().iter_mut() { - sink.unbounded_send(notification.clone()).unwrap(); + let _ = sink.unbounded_send(notification.clone()); } } } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 9544736d84c8e634613d0a7e363e9398f1aa0b9d..89d8c4ce2713d6b0b01411a3bf3928aa42eb363b 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -242,12 +242,12 @@ async fn follow_with_runtime() { let event: FollowEvent = get_next_event(&mut sub).await; // it is basically json-encoded substrate_test_runtime_client::runtime::VERSION - let runtime_str = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":0,\ - \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",4],\ + let runtime_str = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ + \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",5],\ [\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ [\"0xbc9d89904f5b923f\",1],[\"0xc6e9a76309f39b09\",2],[\"0xdd718d5cc53262d4\",1],\ [\"0xcbca25e39f142387\",2],[\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],\ - [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"stateVersion\":0}"; + [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"stateVersion\":1}"; let runtime: RuntimeVersion = serde_json::from_str(runtime_str).unwrap(); diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests.rs deleted file mode 100644 index 382f5adeae19ebd7da366036b5092b0208b62568..0000000000000000000000000000000000000000 --- a/substrate/client/rpc-spec-v2/src/transaction/tests.rs +++ /dev/null @@ -1,238 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use super::*; -use crate::{ - chain_head::test_utils::ChainHeadMockClient, hex_string, - transaction::TransactionBroadcast as RpcTransactionBroadcast, -}; -use assert_matches::assert_matches; -use codec::Encode; -use futures::Future; -use jsonrpsee::{rpc_params, MethodsError as Error, RpcModule}; -use sc_transaction_pool::*; -use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionPool}; -use sp_core::{testing::TaskExecutor, traits::SpawnNamed}; -use std::{pin::Pin, sync::Arc, time::Duration}; -use substrate_test_runtime_client::{prelude::*, AccountKeyring::*, Client}; -use substrate_test_runtime_transaction_pool::{uxt, TestApi}; -use tokio::sync::mpsc; - -type Block = substrate_test_runtime_client::runtime::Block; - -/// Wrap the `TaskExecutor` to know when the broadcast future is dropped. -#[derive(Clone)] -struct TaskExecutorBroadcast { - executor: TaskExecutor, - sender: mpsc::UnboundedSender<()>, -} - -/// The channel that receives events when the broadcast futures are dropped. -type TaskExecutorRecv = mpsc::UnboundedReceiver<()>; - -impl TaskExecutorBroadcast { - /// Construct a new `TaskExecutorBroadcast` and a receiver to know when the broadcast futures - /// are dropped. - fn new() -> (Self, TaskExecutorRecv) { - let (sender, recv) = mpsc::unbounded_channel(); - - (Self { executor: TaskExecutor::new(), sender }, recv) - } -} - -impl SpawnNamed for TaskExecutorBroadcast { - fn spawn( - &self, - name: &'static str, - group: Option<&'static str>, - future: futures::future::BoxFuture<'static, ()>, - ) { - let sender = self.sender.clone(); - let future = Box::pin(async move { - future.await; - let _ = sender.send(()); - }); - - self.executor.spawn(name, group, future) - } - - fn spawn_blocking( - &self, - name: &'static str, - group: Option<&'static str>, - future: futures::future::BoxFuture<'static, ()>, - ) { - let sender = self.sender.clone(); - let future = Box::pin(async move { - future.await; - let _ = sender.send(()); - }); - - self.executor.spawn_blocking(name, group, future) - } -} - -/// Initial Alice account nonce. -const ALICE_NONCE: u64 = 209; - -fn create_basic_pool_with_genesis( - test_api: Arc, -) -> (BasicPool, Pin + Send>>) { - let genesis_hash = { - test_api - .chain() - .read() - .block_by_number - .get(&0) - .map(|blocks| blocks[0].0.header.hash()) - .expect("there is block 0. qed") - }; - BasicPool::new_test(test_api, genesis_hash, genesis_hash) -} - -fn maintained_pool() -> (BasicPool, Arc, futures::executor::ThreadPool) { - let api = Arc::new(TestApi::with_alice_nonce(ALICE_NONCE)); - let (pool, background_task) = create_basic_pool_with_genesis(api.clone()); - - let thread_pool = futures::executor::ThreadPool::new().unwrap(); - thread_pool.spawn_ok(background_task); - (pool, api, thread_pool) -} - -fn setup_api() -> ( - Arc, - Arc>, - Arc>>, - RpcModule< - TransactionBroadcast, ChainHeadMockClient>>, - >, - TaskExecutorRecv, -) { - let (pool, api, _) = maintained_pool(); - let pool = Arc::new(pool); - - let builder = TestClientBuilder::new(); - let client = Arc::new(builder.build()); - let client_mock = Arc::new(ChainHeadMockClient::new(client.clone())); - - let (task_executor, executor_recv) = TaskExecutorBroadcast::new(); - - let tx_api = - RpcTransactionBroadcast::new(client_mock.clone(), pool.clone(), Arc::new(task_executor)) - .into_rpc(); - - (api, pool, client_mock, tx_api, executor_recv) -} - -#[tokio::test] -async fn tx_broadcast_enters_pool() { - let (api, pool, client_mock, tx_api, _) = setup_api(); - - // Start at block 1. - let block_1_header = api.push_block(1, vec![], true); - - let uxt = uxt(Alice, ALICE_NONCE); - let xt = hex_string(&uxt.encode()); - - let operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); - - // Announce block 1 to `transaction_unstable_broadcast`. - client_mock.trigger_import_stream(block_1_header).await; - - // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. - - // TODO: Improve testability by extending the `transaction_unstable_broadcast` with - // a middleware trait that intercepts the transaction status for testing. - let mut num_retries = 12; - while num_retries > 0 && pool.status().ready != 1 { - tokio::time::sleep(Duration::from_secs(5)).await; - num_retries -= 1; - } - assert_eq!(1, pool.status().ready); - assert_eq!(uxt.encode().len(), pool.status().ready_bytes); - - // Import block 2 with the transaction included. - let block_2_header = api.push_block(2, vec![uxt.clone()], true); - let block_2 = block_2_header.hash(); - - // Announce block 2 to the pool. - let event = ChainEvent::NewBestBlock { hash: block_2, tree_route: None }; - pool.maintain(event).await; - - assert_eq!(0, pool.status().ready); - - // Stop call can still be made. - let _: () = tx_api - .call("transaction_unstable_stop", rpc_params![&operation_id]) - .await - .unwrap(); -} - -#[tokio::test] -async fn tx_broadcast_invalid_tx() { - let (_, pool, _, tx_api, mut exec_recv) = setup_api(); - - // Invalid parameters. - let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_broadcast", [1u8]) - .await - .unwrap_err(); - assert_matches!(err, - Error::JsonRpc(err) if err.code() == super::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid params" - ); - - assert_eq!(0, pool.status().ready); - - // Invalid transaction that cannot be decoded. The broadcast silently exits. - let xt = "0xdeadbeef"; - let operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); - - assert_eq!(0, pool.status().ready); - - // Await the broadcast future to exit. - // Without this we'd be subject to races, where we try to call the stop before the tx is - // dropped. - exec_recv.recv().await.unwrap(); - - // The broadcast future was dropped, and the operation is no longer active. - // When the operation is not active, either from the tx being finalized or a - // terminal error; the stop method should return an error. - let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_stop", rpc_params![&operation_id]) - .await - .unwrap_err(); - assert_matches!(err, - Error::JsonRpc(err) if err.code() == super::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" - ); -} - -#[tokio::test] -async fn tx_invalid_stop() { - let (_, _, _, tx_api, _) = setup_api(); - - // Make an invalid stop call. - let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_stop", ["invalid_operation_id"]) - .await - .unwrap_err(); - assert_matches!(err, - Error::JsonRpc(err) if err.code() == super::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" - ); -} diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/executor.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/executor.rs new file mode 100644 index 0000000000000000000000000000000000000000..ff9aca79887c3f29025697f9f5832c914735e0d6 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/executor.rs @@ -0,0 +1,100 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sp_core::{testing::TaskExecutor, traits::SpawnNamed}; +use std::sync::{atomic::AtomicUsize, Arc}; +use tokio::sync::mpsc; + +/// Wrap the `TaskExecutor` to know when the broadcast future is dropped. +#[derive(Clone)] +pub struct TaskExecutorBroadcast { + executor: TaskExecutor, + sender: mpsc::UnboundedSender<()>, + num_tasks: Arc, +} + +/// The channel that receives events when the broadcast futures are dropped. +pub type TaskExecutorRecv = mpsc::UnboundedReceiver<()>; + +/// The state of the `TaskExecutorBroadcast`. +pub struct TaskExecutorState { + pub recv: TaskExecutorRecv, + pub num_tasks: Arc, +} + +impl TaskExecutorState { + pub fn num_tasks(&self) -> usize { + self.num_tasks.load(std::sync::atomic::Ordering::Acquire) + } +} + +impl TaskExecutorBroadcast { + /// Construct a new `TaskExecutorBroadcast` and a receiver to know when the broadcast futures + /// are dropped. + pub fn new() -> (Self, TaskExecutorState) { + let (sender, recv) = mpsc::unbounded_channel(); + let num_tasks = Arc::new(AtomicUsize::new(0)); + + ( + Self { executor: TaskExecutor::new(), sender, num_tasks: num_tasks.clone() }, + TaskExecutorState { recv, num_tasks }, + ) + } +} + +impl SpawnNamed for TaskExecutorBroadcast { + fn spawn( + &self, + name: &'static str, + group: Option<&'static str>, + future: futures::future::BoxFuture<'static, ()>, + ) { + let sender = self.sender.clone(); + let num_tasks = self.num_tasks.clone(); + + let future = Box::pin(async move { + num_tasks.fetch_add(1, std::sync::atomic::Ordering::AcqRel); + future.await; + num_tasks.fetch_sub(1, std::sync::atomic::Ordering::AcqRel); + + let _ = sender.send(()); + }); + + self.executor.spawn(name, group, future) + } + + fn spawn_blocking( + &self, + name: &'static str, + group: Option<&'static str>, + future: futures::future::BoxFuture<'static, ()>, + ) { + let sender = self.sender.clone(); + let num_tasks = self.num_tasks.clone(); + + let future = Box::pin(async move { + num_tasks.fetch_add(1, std::sync::atomic::Ordering::AcqRel); + future.await; + num_tasks.fetch_sub(1, std::sync::atomic::Ordering::AcqRel); + + let _ = sender.send(()); + }); + + self.executor.spawn_blocking(name, group, future) + } +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs new file mode 100644 index 0000000000000000000000000000000000000000..aa8ac572dec9d8d6de9212fcb94c97ce2b804da4 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs @@ -0,0 +1,187 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use codec::Encode; +use futures::Future; +use sc_transaction_pool::BasicPool; +use sc_transaction_pool_api::{ + ImportNotificationStream, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, + TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, +}; + +use crate::hex_string; +use futures::{FutureExt, StreamExt}; + +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::{collections::HashMap, pin::Pin, sync::Arc}; +use substrate_test_runtime_transaction_pool::TestApi; +use tokio::sync::mpsc; + +pub type Block = substrate_test_runtime_client::runtime::Block; + +pub type TxTestPool = MiddlewarePool; +pub type TxStatusType = sc_transaction_pool_api::TransactionStatus< + sc_transaction_pool_api::TxHash, + sc_transaction_pool_api::BlockHash, +>; +pub type TxStatusTypeTest = TxStatusType; + +/// The type of the event that the middleware captures. +#[derive(Debug, PartialEq)] +pub enum MiddlewarePoolEvent { + TransactionStatus { + transaction: String, + status: sc_transaction_pool_api::TransactionStatus< + ::Hash, + ::Hash, + >, + }, + PoolError { + transaction: String, + err: String, + }, +} + +/// The channel that receives events when the broadcast futures are dropped. +pub type MiddlewarePoolRecv = mpsc::UnboundedReceiver; + +/// Add a middleware to the transaction pool. +/// +/// This wraps the `submit_and_watch` to gain access to the events. +pub struct MiddlewarePool { + pub inner_pool: Arc>, + /// Send the middleware events to the test. + sender: mpsc::UnboundedSender, +} + +impl MiddlewarePool { + /// Construct a new [`MiddlewarePool`]. + pub fn new(pool: Arc>) -> (Self, MiddlewarePoolRecv) { + let (sender, recv) = mpsc::unbounded_channel(); + (MiddlewarePool { inner_pool: pool, sender }, recv) + } +} + +impl TransactionPool for MiddlewarePool { + type Block = as TransactionPool>::Block; + type Hash = as TransactionPool>::Hash; + type InPoolTransaction = as TransactionPool>::InPoolTransaction; + type Error = as TransactionPool>::Error; + + fn submit_at( + &self, + at: ::Hash, + source: TransactionSource, + xts: Vec>, + ) -> PoolFuture, Self::Error>>, Self::Error> { + self.inner_pool.submit_at(at, source, xts) + } + + fn submit_one( + &self, + at: ::Hash, + source: TransactionSource, + xt: TransactionFor, + ) -> PoolFuture, Self::Error> { + self.inner_pool.submit_one(at, source, xt) + } + + fn submit_and_watch( + &self, + at: ::Hash, + source: TransactionSource, + xt: TransactionFor, + ) -> PoolFuture>>, Self::Error> { + let pool = self.inner_pool.clone(); + let sender = self.sender.clone(); + let transaction = hex_string(&xt.encode()); + + async move { + let watcher = match pool.submit_and_watch(at, source, xt).await { + Ok(watcher) => watcher, + Err(err) => { + let _ = sender.send(MiddlewarePoolEvent::PoolError { + transaction: transaction.clone(), + err: err.to_string(), + }); + return Err(err); + }, + }; + + let watcher = watcher.map(move |status| { + let sender = sender.clone(); + let transaction = transaction.clone(); + + let _ = sender.send(MiddlewarePoolEvent::TransactionStatus { + transaction, + status: status.clone(), + }); + + status + }); + + Ok(watcher.boxed()) + } + .boxed() + } + + fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { + self.inner_pool.remove_invalid(hashes) + } + + fn status(&self) -> PoolStatus { + self.inner_pool.status() + } + + fn import_notification_stream(&self) -> ImportNotificationStream> { + self.inner_pool.import_notification_stream() + } + + fn hash_of(&self, xt: &TransactionFor) -> TxHash { + self.inner_pool.hash_of(xt) + } + + fn on_broadcasted(&self, propagations: HashMap, Vec>) { + self.inner_pool.on_broadcasted(propagations) + } + + fn ready_transaction(&self, hash: &TxHash) -> Option> { + self.inner_pool.ready_transaction(hash) + } + + fn ready_at( + &self, + at: NumberFor, + ) -> Pin< + Box< + dyn Future< + Output = Box> + Send>, + > + Send, + >, + > { + self.inner_pool.ready_at(at) + } + + fn ready(&self) -> Box> + Send> { + self.inner_pool.ready() + } + + fn futures(&self) -> Vec { + self.inner_pool.futures() + } +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/mod.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..ab0caaf906fd08049ccf6c9f421d3134d3689322 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/mod.rs @@ -0,0 +1,24 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +mod executor; +mod middleware_pool; +#[macro_use] +mod setup; + +mod transaction_broadcast_tests; diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs new file mode 100644 index 0000000000000000000000000000000000000000..04ee7b9b4c94c4f3eac763ad06ba9b8964c2287e --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs @@ -0,0 +1,120 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{ + chain_head::test_utils::ChainHeadMockClient, + transaction::{ + api::TransactionBroadcastApiServer, + tests::executor::{TaskExecutorBroadcast, TaskExecutorState}, + TransactionBroadcast as RpcTransactionBroadcast, + }, +}; +use futures::Future; +use jsonrpsee::RpcModule; +use sc_transaction_pool::*; +use std::{pin::Pin, sync::Arc}; +use substrate_test_runtime_client::{prelude::*, Client}; +use substrate_test_runtime_transaction_pool::TestApi; + +use crate::transaction::tests::middleware_pool::{MiddlewarePool, MiddlewarePoolRecv}; + +pub type Block = substrate_test_runtime_client::runtime::Block; + +/// Initial Alice account nonce. +pub const ALICE_NONCE: u64 = 209; + +fn create_basic_pool_with_genesis( + test_api: Arc, + options: Options, +) -> (BasicPool, Pin + Send>>) { + let genesis_hash = { + test_api + .chain() + .read() + .block_by_number + .get(&0) + .map(|blocks| blocks[0].0.header.hash()) + .expect("there is block 0. qed") + }; + BasicPool::new_test(test_api, genesis_hash, genesis_hash, options) +} + +fn maintained_pool( + options: Options, +) -> (BasicPool, Arc, futures::executor::ThreadPool) { + let api = Arc::new(TestApi::with_alice_nonce(ALICE_NONCE)); + let (pool, background_task) = create_basic_pool_with_genesis(api.clone(), options); + + let thread_pool = futures::executor::ThreadPool::new().unwrap(); + thread_pool.spawn_ok(background_task); + (pool, api, thread_pool) +} + +pub fn setup_api( + options: Options, +) -> ( + Arc, + Arc, + Arc>>, + RpcModule>>>, + TaskExecutorState, + MiddlewarePoolRecv, +) { + let (pool, api, _) = maintained_pool(options); + let (pool, pool_state) = MiddlewarePool::new(Arc::new(pool).clone()); + let pool = Arc::new(pool); + + let builder = TestClientBuilder::new(); + let client = Arc::new(builder.build()); + let client_mock = Arc::new(ChainHeadMockClient::new(client.clone())); + + let (task_executor, executor_recv) = TaskExecutorBroadcast::new(); + + let tx_api = + RpcTransactionBroadcast::new(client_mock.clone(), pool.clone(), Arc::new(task_executor)) + .into_rpc(); + + (api, pool, client_mock, tx_api, executor_recv, pool_state) +} + +/// Get the next event from the provided middleware in at most 5 seconds. +macro_rules! get_next_event { + ($middleware:expr) => { + tokio::time::timeout(std::time::Duration::from_secs(5), $middleware.recv()) + .await + .unwrap() + .unwrap() + }; +} + +/// Collect the next number of transaction events from the provided middleware. +macro_rules! get_next_tx_events { + ($middleware:expr, $num:expr) => {{ + let mut events = std::collections::HashMap::new(); + for _ in 0..$num { + let event = get_next_event!($middleware); + match event { + crate::transaction::tests::middleware_pool::MiddlewarePoolEvent::TransactionStatus { transaction, status } => { + events.entry(transaction).or_insert_with(|| vec![]).push(status); + }, + other => panic!("Expected TransactionStatus, received {:?}", other), + }; + } + events + }}; +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..690a1a64d7460c656f5180c59120c951dfb04472 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -0,0 +1,523 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{hex_string, transaction::error::json_rpc_spec}; +use assert_matches::assert_matches; +use codec::Encode; +use jsonrpsee::{rpc_params, MethodsError as Error}; +use sc_transaction_pool::{Options, PoolLimit}; +use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionPool}; +use std::sync::Arc; +use substrate_test_runtime_client::AccountKeyring::*; +use substrate_test_runtime_transaction_pool::uxt; + +// Test helpers. +use crate::transaction::tests::{ + middleware_pool::{MiddlewarePoolEvent, TxStatusTypeTest}, + setup::{setup_api, ALICE_NONCE}, +}; + +#[tokio::test] +async fn tx_broadcast_enters_pool() { + let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = + setup_api(Default::default()); + + // Start at block 1. + let block_1_header = api.push_block(1, vec![], true); + + let uxt = uxt(Alice, ALICE_NONCE); + let xt = hex_string(&uxt.encode()); + + let operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + + // Announce block 1 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_1_header).await; + + // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Ready + } + ); + + assert_eq!(1, pool.inner_pool.status().ready); + assert_eq!(uxt.encode().len(), pool.inner_pool.status().ready_bytes); + + // Import block 2 with the transaction included. + let block_2_header = api.push_block(2, vec![uxt.clone()], true); + let block_2 = block_2_header.hash(); + + // Announce block 2 to the pool. + let event = ChainEvent::NewBestBlock { hash: block_2, tree_route: None }; + pool.inner_pool.maintain(event).await; + assert_eq!(0, pool.inner_pool.status().ready); + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::InBlock((block_2, 0)) + } + ); + + // The future broadcast awaits for the finalized status to be reached. + // Force the future to exit by calling stop. + let _: () = tx_api + .call("transaction_unstable_stop", rpc_params![&operation_id]) + .await + .unwrap(); + + // Ensure the broadcast future finishes. + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); +} + +#[tokio::test] +async fn tx_broadcast_invalid_tx() { + let (_, pool, _, tx_api, mut exec_middleware, _) = setup_api(Default::default()); + + // Invalid parameters. + let err = tx_api + .call::<_, serde_json::Value>("transaction_unstable_broadcast", [1u8]) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(err) if err.code() == json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid params" + ); + + assert_eq!(0, pool.status().ready); + + // Invalid transaction that cannot be decoded. The broadcast silently exits. + let xt = "0xdeadbeef"; + let operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + + assert_eq!(0, pool.status().ready); + + // Await the broadcast future to exit. + // Without this we'd be subject to races, where we try to call the stop before the tx is + // dropped. + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); + + // The broadcast future was dropped, and the operation is no longer active. + // When the operation is not active, either from the tx being finalized or a + // terminal error; the stop method should return an error. + let err = tx_api + .call::<_, serde_json::Value>("transaction_unstable_stop", rpc_params![&operation_id]) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(err) if err.code() == json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" + ); +} + +#[tokio::test] +async fn tx_stop_with_invalid_operation_id() { + let (_, _, _, tx_api, _, _) = setup_api(Default::default()); + + // Make an invalid stop call. + let err = tx_api + .call::<_, serde_json::Value>("transaction_unstable_stop", ["invalid_operation_id"]) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(err) if err.code() == json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" + ); +} + +#[tokio::test] +async fn tx_broadcast_resubmits_future_nonce_tx() { + let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = + setup_api(Default::default()); + + // Start at block 1. + let block_1_header = api.push_block(1, vec![], true); + let block_1 = block_1_header.hash(); + + let current_uxt = uxt(Alice, ALICE_NONCE); + let current_xt = hex_string(¤t_uxt.encode()); + // This lives in the future. + let future_uxt = uxt(Alice, ALICE_NONCE + 1); + let future_xt = hex_string(&future_uxt.encode()); + + let future_operation_id: String = tx_api + .call("transaction_unstable_broadcast", rpc_params![&future_xt]) + .await + .unwrap(); + + // Announce block 1 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_1_header).await; + + // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: future_xt.clone(), + status: TxStatusTypeTest::Future + } + ); + + let event = ChainEvent::NewBestBlock { hash: block_1, tree_route: None }; + pool.inner_pool.maintain(event).await; + assert_eq!(0, pool.inner_pool.status().ready); + // Ensure the tx is in the future. + assert_eq!(1, pool.inner_pool.status().future); + + let block_2_header = api.push_block(2, vec![], true); + let block_2 = block_2_header.hash(); + + let operation_id: String = tx_api + .call("transaction_unstable_broadcast", rpc_params![¤t_xt]) + .await + .unwrap(); + assert_ne!(future_operation_id, operation_id); + + // Announce block 2 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_2_header).await; + + // Collect the events of both transactions. + let events = get_next_tx_events!(&mut pool_middleware, 2); + // Transactions entered the ready queue. + assert_eq!(events.get(¤t_xt).unwrap(), &vec![TxStatusTypeTest::Ready]); + assert_eq!(events.get(&future_xt).unwrap(), &vec![TxStatusTypeTest::Ready]); + + let event = ChainEvent::NewBestBlock { hash: block_2, tree_route: None }; + pool.inner_pool.maintain(event).await; + assert_eq!(2, pool.inner_pool.status().ready); + assert_eq!(0, pool.inner_pool.status().future); + + // Finalize transactions. + let block_3_header = api.push_block(3, vec![current_uxt, future_uxt], true); + let block_3 = block_3_header.hash(); + client_mock.trigger_import_stream(block_3_header).await; + + let event = ChainEvent::Finalized { hash: block_3, tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + assert_eq!(0, pool.inner_pool.status().ready); + assert_eq!(0, pool.inner_pool.status().future); + + let events = get_next_tx_events!(&mut pool_middleware, 4); + assert_eq!( + events.get(¤t_xt).unwrap(), + &vec![TxStatusTypeTest::InBlock((block_3, 0)), TxStatusTypeTest::Finalized((block_3, 0))] + ); + assert_eq!( + events.get(&future_xt).unwrap(), + &vec![TxStatusTypeTest::InBlock((block_3, 1)), TxStatusTypeTest::Finalized((block_3, 1))] + ); + + // Both broadcast futures must exit. + let _ = get_next_event!(&mut exec_middleware.recv); + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); +} + +/// This test is similar to `tx_broadcast_enters_pool` +/// However the last block is announced as finalized to force the +/// broadcast future to exit before the `stop` is called. +#[tokio::test] +async fn tx_broadcast_stop_after_broadcast_finishes() { + let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = + setup_api(Default::default()); + + // Start at block 1. + let block_1_header = api.push_block(1, vec![], true); + + let uxt = uxt(Alice, ALICE_NONCE); + let xt = hex_string(&uxt.encode()); + + let operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + + // Announce block 1 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_1_header).await; + + // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction + // pool.inner_pool. + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Ready + } + ); + + assert_eq!(1, pool.inner_pool.status().ready); + assert_eq!(uxt.encode().len(), pool.inner_pool.status().ready_bytes); + + // Import block 2 with the transaction included. + let block_2_header = api.push_block(2, vec![uxt.clone()], true); + let block_2 = block_2_header.hash(); + + // Announce block 2 to the pool.inner_pool. + let event = ChainEvent::Finalized { hash: block_2, tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + + assert_eq!(0, pool.inner_pool.status().ready); + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::InBlock((block_2, 0)) + } + ); + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Finalized((block_2, 0)) + } + ); + + // Ensure the broadcast future terminated properly. + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); + + // The operation ID is no longer valid, check that the broadcast future + // cleared out the inner state of the operation. + let err = tx_api + .call::<_, serde_json::Value>("transaction_unstable_stop", rpc_params![&operation_id]) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(err) if err.code() == json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" + ); +} + +#[tokio::test] +async fn tx_broadcast_resubmits_invalid_tx() { + let limits = PoolLimit { count: 8192, total_bytes: 20 * 1024 * 1024 }; + let options = Options { + ready: limits.clone(), + future: limits, + reject_future_transactions: false, + // This ensures that a transaction is not banned. + ban_time: std::time::Duration::ZERO, + }; + + let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = + setup_api(options); + + let uxt = uxt(Alice, ALICE_NONCE); + let xt = hex_string(&uxt.encode()); + let _operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + + let block_1_header = api.push_block(1, vec![], true); + let block_1 = block_1_header.hash(); + // Announce block 1 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_1_header).await; + + // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Ready, + } + ); + assert_eq!(1, pool.inner_pool.status().ready); + assert_eq!(uxt.encode().len(), pool.inner_pool.status().ready_bytes); + + // Mark the transaction as invalid from the API, causing a temporary ban. + api.add_invalid(&uxt); + + // Push an event to the pool to ensure the transaction is excluded. + let event = ChainEvent::NewBestBlock { hash: block_1, tree_route: None }; + pool.inner_pool.maintain(event).await; + assert_eq!(1, pool.inner_pool.status().ready); + + // Ensure the `transaction_unstable_broadcast` is aware of the invalid transaction. + let event = get_next_event!(&mut pool_middleware); + // Because we have received an `Invalid` status, we try to broadcast the transaction with the + // next announced block. + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Invalid + } + ); + + // Import block 2. + let block_2_header = api.push_block(2, vec![], true); + client_mock.trigger_import_stream(block_2_header).await; + + // Ensure we propagate the temporary ban error to `submit_and_watch`. + // This ensures we'll loop again with the next annmounced block and try to resubmit the + // transaction. The transaction remains temporarily banned until the pool is maintained. + let event = get_next_event!(&mut pool_middleware); + assert_matches!(event, MiddlewarePoolEvent::PoolError { transaction, err } if transaction == xt && err.contains("Transaction temporarily Banned")); + + // Import block 3. + let block_3_header = api.push_block(3, vec![], true); + let block_3 = block_3_header.hash(); + // Remove the invalid transaction from the pool to allow it to pass through. + api.remove_invalid(&uxt); + let event = ChainEvent::NewBestBlock { hash: block_3, tree_route: None }; + // We have to maintain the pool to ensure the transaction is no longer invalid. + // This clears out the banned transactions. + pool.inner_pool.maintain(event).await; + assert_eq!(0, pool.inner_pool.status().ready); + + // Announce block to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_3_header).await; + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Ready, + } + ); + assert_eq!(1, pool.inner_pool.status().ready); + + let block_4_header = api.push_block(4, vec![uxt], true); + let block_4 = block_4_header.hash(); + let event = ChainEvent::Finalized { hash: block_4, tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::InBlock((block_4, 0)), + } + ); + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Finalized((block_4, 0)), + } + ); + + // Ensure the broadcast future terminated properly. + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); +} + +/// This is similar to `tx_broadcast_resubmits_invalid_tx`. +/// However, it forces the tx to be resubmited because of the pool +/// limits. Which is a different code path than the invalid tx. +#[tokio::test] +async fn tx_broadcast_resubmits_dropped_tx() { + let limits = PoolLimit { count: 1, total_bytes: 1000 }; + let options = Options { + ready: limits.clone(), + future: limits, + reject_future_transactions: false, + // This ensures that a transaction is not banned. + ban_time: std::time::Duration::ZERO, + }; + + let (api, pool, client_mock, tx_api, _, mut pool_middleware) = setup_api(options); + + let current_uxt = uxt(Alice, ALICE_NONCE); + let current_xt = hex_string(¤t_uxt.encode()); + // This lives in the future. + let future_uxt = uxt(Alice, ALICE_NONCE + 1); + let future_xt = hex_string(&future_uxt.encode()); + + // By default the `validate_transaction` mock uses priority 1 for + // transactions. Bump the priority to ensure other transactions + // are immediately dropped. + api.set_priority(¤t_uxt, 10); + + let current_operation_id: String = tx_api + .call("transaction_unstable_broadcast", rpc_params![¤t_xt]) + .await + .unwrap(); + + // Announce block 1 to `transaction_unstable_broadcast`. + let block_1_header = api.push_block(1, vec![], true); + let event = + ChainEvent::Finalized { hash: block_1_header.hash(), tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + client_mock.trigger_import_stream(block_1_header).await; + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: current_xt.clone(), + status: TxStatusTypeTest::Ready, + } + ); + assert_eq!(1, pool.inner_pool.status().ready); + + // The future tx has priority 2, smaller than the current 10. + api.set_priority(&future_uxt, 2); + let future_operation_id: String = tx_api + .call("transaction_unstable_broadcast", rpc_params![&future_xt]) + .await + .unwrap(); + assert_ne!(current_operation_id, future_operation_id); + + let block_2_header = api.push_block(2, vec![], true); + let event = + ChainEvent::Finalized { hash: block_2_header.hash(), tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + client_mock.trigger_import_stream(block_2_header).await; + + // We must have at most 1 transaction in the pool, as per limits above. + assert_eq!(1, pool.inner_pool.status().ready); + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::PoolError { + transaction: future_xt.clone(), + err: "Transaction couldn't enter the pool because of the limit".into() + } + ); + + let block_3_header = api.push_block(3, vec![current_uxt], true); + let event = + ChainEvent::Finalized { hash: block_3_header.hash(), tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + client_mock.trigger_import_stream(block_3_header.clone()).await; + + // The first tx is in a finalzied block; the future tx must enter the pool. + let events = get_next_tx_events!(&mut pool_middleware, 3); + assert_eq!( + events.get(¤t_xt).unwrap(), + &vec![ + TxStatusTypeTest::InBlock((block_3_header.hash(), 0)), + TxStatusTypeTest::Finalized((block_3_header.hash(), 0)) + ] + ); + // The dropped transaction was resubmitted. + assert_eq!(events.get(&future_xt).unwrap(), &vec![TxStatusTypeTest::Ready]); +} diff --git a/substrate/client/rpc/src/state/tests.rs b/substrate/client/rpc/src/state/tests.rs index 96f4c1be960faa5f15508f19a9dd5665d8b684c1..dd866e671c5095dca9b5851111340a4e32f71e67 100644 --- a/substrate/client/rpc/src/state/tests.rs +++ b/substrate/client/rpc/src/state/tests.rs @@ -475,7 +475,7 @@ async fn should_return_runtime_version() { // it is basically json-encoded substrate_test_runtime_client::runtime::VERSION let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ - \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",4],\ + \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",5],\ [\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ [\"0xbc9d89904f5b923f\",1],[\"0xc6e9a76309f39b09\",2],[\"0xdd718d5cc53262d4\",1],\ [\"0xcbca25e39f142387\",2],[\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],\ diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index 73edceb2ef3608c2c6da65806c1caf665bcfddd9..bbf67d1fbd0af6dceada3b73178004558044d0b2 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -84,6 +84,7 @@ tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "tim tempfile = "3.1.0" directories = "5.0.1" static_init = "1.0.3" +schnellru = "0.2.1" [dev-dependencies] substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 108c258595a7325a5d85bb8024af13565a9062ff..35e8b53a09cf2d802a0c4a873f7ecb8099764e83 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -19,8 +19,8 @@ //! Substrate Client use super::block_rules::{BlockRules, LookupResult as BlockLookupResult}; -use futures::{FutureExt, StreamExt}; -use log::{error, info, trace, warn}; +use crate::client::notification_pinning::NotificationPinningWorker; +use log::{debug, info, trace, warn}; use parking_lot::{Mutex, RwLock}; use prometheus_endpoint::Registry; use rand::Rng; @@ -38,7 +38,7 @@ use sc_client_api::{ execution_extensions::ExecutionExtensions, notifications::{StorageEventStream, StorageNotifications}, CallExecutor, ExecutorProvider, KeysIter, OnFinalityAction, OnImportAction, PairsIter, - ProofProvider, UsageProvider, + ProofProvider, UnpinWorkerMessage, UsageProvider, }; use sc_consensus::{ BlockCheckParams, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction, @@ -114,7 +114,7 @@ where block_rules: BlockRules, config: ClientConfig, telemetry: Option, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, _phantom: PhantomData, } @@ -326,19 +326,35 @@ where // dropped, the block will be unpinned automatically. if let Some(ref notification) = finality_notification { if let Err(err) = self.backend.pin_block(notification.hash) { - error!( + debug!( "Unable to pin block for finality notification. hash: {}, Error: {}", notification.hash, err ); - }; + } else { + let _ = self + .unpin_worker_sender + .unbounded_send(UnpinWorkerMessage::AnnouncePin(notification.hash)) + .map_err(|e| { + log::error!( + "Unable to send AnnouncePin worker message for finality: {e}" + ) + }); + } } if let Some(ref notification) = import_notification { if let Err(err) = self.backend.pin_block(notification.hash) { - error!( + debug!( "Unable to pin block for import notification. hash: {}, Error: {}", notification.hash, err ); + } else { + let _ = self + .unpin_worker_sender + .unbounded_send(UnpinWorkerMessage::AnnouncePin(notification.hash)) + .map_err(|e| { + log::error!("Unable to send AnnouncePin worker message for import: {e}") + }); }; } @@ -416,25 +432,12 @@ where backend.commit_operation(op)?; } - let (unpin_worker_sender, mut rx) = - tracing_unbounded::("unpin-worker-channel", 10_000); - let task_backend = Arc::downgrade(&backend); - spawn_handle.spawn( - "unpin-worker", - None, - async move { - while let Some(message) = rx.next().await { - if let Some(backend) = task_backend.upgrade() { - backend.unpin_block(message); - } else { - log::debug!("Terminating unpin-worker, backend reference was dropped."); - return - } - } - log::debug!("Terminating unpin-worker, stream terminated.") - } - .boxed(), + let (unpin_worker_sender, rx) = tracing_unbounded::>( + "notification-pinning-worker-channel", + 10_000, ); + let unpin_worker = NotificationPinningWorker::new(rx, backend.clone()); + spawn_handle.spawn("notification-pinning-worker", None, Box::pin(unpin_worker.run())); Ok(Client { backend, diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index a13fd4317e1553d379ea068c516e74072d3c8c95..0703cc2b47d144d4e67418cfb9966cd1cd209392 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -47,6 +47,7 @@ mod block_rules; mod call_executor; mod client; +mod notification_pinning; mod wasm_override; mod wasm_substitutes; diff --git a/substrate/client/service/src/client/notification_pinning.rs b/substrate/client/service/src/client/notification_pinning.rs new file mode 100644 index 0000000000000000000000000000000000000000..80de91c02f1ae0273d42edc79d442aa312d09356 --- /dev/null +++ b/substrate/client/service/src/client/notification_pinning.rs @@ -0,0 +1,353 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Notification pinning related logic. +//! +//! This file contains a worker that should be started when a new client instance is created. +//! The goal is to avoid pruning of blocks that have active notifications in the node. Every +//! recipient of notifications should receive the chance to act upon them. In addition, notification +//! listeners can hold onto a [`sc_client_api::UnpinHandle`] to keep a block pinned. Once the handle +//! is dropped, a message is sent and the worker unpins the respective block. +use std::{ + marker::PhantomData, + sync::{Arc, Weak}, +}; + +use futures::StreamExt; +use sc_client_api::{Backend, UnpinWorkerMessage}; + +use sc_utils::mpsc::TracingUnboundedReceiver; +use schnellru::Limiter; +use sp_runtime::traits::Block as BlockT; + +const LOG_TARGET: &str = "db::notification_pinning"; +const NOTIFICATION_PINNING_LIMIT: usize = 1024; + +/// A limiter which automatically unpins blocks that leave the data structure. +#[derive(Clone, Debug)] +struct UnpinningByLengthLimiter> { + max_length: usize, + backend: Weak, + _phantom: PhantomData, +} + +impl> UnpinningByLengthLimiter { + /// Creates a new length limiter with a given `max_length`. + pub fn new(max_length: usize, backend: Weak) -> UnpinningByLengthLimiter { + UnpinningByLengthLimiter { max_length, backend, _phantom: PhantomData::::default() } + } +} + +impl> Limiter + for UnpinningByLengthLimiter +{ + type KeyToInsert<'a> = Block::Hash; + type LinkType = usize; + + fn is_over_the_limit(&self, length: usize) -> bool { + length > self.max_length + } + + fn on_insert( + &mut self, + _length: usize, + key: Self::KeyToInsert<'_>, + value: u32, + ) -> Option<(Block::Hash, u32)> { + log::debug!(target: LOG_TARGET, "Pinning block based on notification. hash = {key}"); + if self.max_length > 0 { + Some((key, value)) + } else { + None + } + } + + fn on_replace( + &mut self, + _length: usize, + _old_key: &mut Block::Hash, + _new_key: Block::Hash, + _old_value: &mut u32, + _new_value: &mut u32, + ) -> bool { + true + } + + fn on_removed(&mut self, key: &mut Block::Hash, references: &mut u32) { + // If reference count was larger than 0 on removal, + // the item was removed due to capacity limitations. + // Since the cache should be large enough for pinned items, + // we want to know about these evictions. + if *references > 0 { + log::warn!( + target: LOG_TARGET, + "Notification block pinning limit reached. Unpinning block with hash = {key:?}" + ); + if let Some(backend) = self.backend.upgrade() { + (0..*references).for_each(|_| backend.unpin_block(*key)); + } + } else { + log::trace!( + target: LOG_TARGET, + "Unpinned block. hash = {key:?}", + ) + } + } + + fn on_cleared(&mut self) {} + + fn on_grow(&mut self, _new_memory_usage: usize) -> bool { + true + } +} + +/// Worker for the handling of notification pinning. +/// +/// It receives messages from a receiver and pins/unpins based on the incoming messages. +/// All notification related unpinning should go through this worker. If the maximum number of +/// notification pins is reached, the block from the oldest notification is unpinned. +pub struct NotificationPinningWorker> { + unpin_message_rx: TracingUnboundedReceiver>, + task_backend: Weak, + pinned_blocks: schnellru::LruMap>, +} + +impl> NotificationPinningWorker { + /// Creates a new `NotificationPinningWorker`. + pub fn new( + unpin_message_rx: TracingUnboundedReceiver>, + task_backend: Arc, + ) -> Self { + let pinned_blocks = + schnellru::LruMap::>::new( + UnpinningByLengthLimiter::new( + NOTIFICATION_PINNING_LIMIT, + Arc::downgrade(&task_backend), + ), + ); + Self { unpin_message_rx, task_backend: Arc::downgrade(&task_backend), pinned_blocks } + } + + fn handle_announce_message(&mut self, hash: Block::Hash) { + if let Some(entry) = self.pinned_blocks.get_or_insert(hash, Default::default) { + *entry = *entry + 1; + } + } + + fn handle_unpin_message(&mut self, hash: Block::Hash) -> Result<(), ()> { + if let Some(refcount) = self.pinned_blocks.peek_mut(&hash) { + *refcount = *refcount - 1; + if *refcount == 0 { + self.pinned_blocks.remove(&hash); + } + if let Some(backend) = self.task_backend.upgrade() { + log::debug!(target: LOG_TARGET, "Reducing pinning refcount for block hash = {hash:?}"); + backend.unpin_block(hash); + } else { + log::debug!(target: LOG_TARGET, "Terminating unpin-worker, backend reference was dropped."); + return Err(()) + } + } else { + log::debug!(target: LOG_TARGET, "Received unpin message for already unpinned block. hash = {hash:?}"); + } + Ok(()) + } + + /// Start working on the received messages. + /// + /// The worker maintains a map which keeps track of the pinned blocks and their reference count. + /// Depending upon the received message, it acts to pin/unpin the block. + pub async fn run(mut self) { + while let Some(message) = self.unpin_message_rx.next().await { + match message { + UnpinWorkerMessage::AnnouncePin(hash) => self.handle_announce_message(hash), + UnpinWorkerMessage::Unpin(hash) => + if self.handle_unpin_message(hash).is_err() { + return + }, + } + } + log::debug!(target: LOG_TARGET, "Terminating unpin-worker, stream terminated.") + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use sc_client_api::{Backend, UnpinWorkerMessage}; + use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; + use sp_core::H256; + use sp_runtime::traits::Block as BlockT; + + type Block = substrate_test_runtime_client::runtime::Block; + + use super::{NotificationPinningWorker, UnpinningByLengthLimiter}; + + impl> NotificationPinningWorker { + fn new_with_limit( + unpin_message_rx: TracingUnboundedReceiver>, + task_backend: Arc, + limit: usize, + ) -> Self { + let pinned_blocks = + schnellru::LruMap::>::new( + UnpinningByLengthLimiter::new(limit, Arc::downgrade(&task_backend)), + ); + Self { unpin_message_rx, task_backend: Arc::downgrade(&task_backend), pinned_blocks } + } + + fn lru( + &self, + ) -> &schnellru::LruMap> { + &self.pinned_blocks + } + } + + #[test] + fn pinning_worker_handles_base_case() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash = H256::random(); + + let mut worker = NotificationPinningWorker::new(rx, backend.clone()); + + // Block got pinned and unpin message should unpin in the backend. + let _ = backend.pin_block(hash); + assert_eq!(backend.pin_refs(&hash), Some(1)); + + worker.handle_announce_message(hash); + assert_eq!(worker.lru().len(), 1); + + let _ = worker.handle_unpin_message(hash); + + assert_eq!(backend.pin_refs(&hash), Some(0)); + assert!(worker.lru().is_empty()); + } + + #[test] + fn pinning_worker_handles_multiple_pins() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash = H256::random(); + + let mut worker = NotificationPinningWorker::new(rx, backend.clone()); + // Block got pinned multiple times. + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + assert_eq!(backend.pin_refs(&hash), Some(3)); + + worker.handle_announce_message(hash); + worker.handle_announce_message(hash); + worker.handle_announce_message(hash); + assert_eq!(worker.lru().len(), 1); + + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(2)); + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(1)); + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(0)); + assert!(worker.lru().is_empty()); + + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(0)); + } + + #[test] + fn pinning_worker_handles_too_many_unpins() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash = H256::random(); + let hash2 = H256::random(); + + let mut worker = NotificationPinningWorker::new(rx, backend.clone()); + // Block was announced once but unpinned multiple times. The worker should ignore the + // additional unpins. + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + assert_eq!(backend.pin_refs(&hash), Some(3)); + + worker.handle_announce_message(hash); + assert_eq!(worker.lru().len(), 1); + + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(2)); + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(2)); + assert!(worker.lru().is_empty()); + + let _ = worker.handle_unpin_message(hash2); + assert!(worker.lru().is_empty()); + assert_eq!(backend.pin_refs(&hash2), None); + } + + #[test] + fn pinning_worker_should_evict_when_limit_reached() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash1 = H256::random(); + let hash2 = H256::random(); + let hash3 = H256::random(); + let hash4 = H256::random(); + + // Only two items fit into the cache. + let mut worker = NotificationPinningWorker::new_with_limit(rx, backend.clone(), 2); + + // Multiple blocks are announced but the cache size is too small. We expect that blocks + // are evicted by the cache and unpinned in the backend. + let _ = backend.pin_block(hash1); + let _ = backend.pin_block(hash2); + let _ = backend.pin_block(hash3); + assert_eq!(backend.pin_refs(&hash1), Some(1)); + assert_eq!(backend.pin_refs(&hash2), Some(1)); + assert_eq!(backend.pin_refs(&hash3), Some(1)); + + worker.handle_announce_message(hash1); + assert!(worker.lru().peek(&hash1).is_some()); + worker.handle_announce_message(hash2); + assert!(worker.lru().peek(&hash2).is_some()); + worker.handle_announce_message(hash3); + assert!(worker.lru().peek(&hash3).is_some()); + assert!(worker.lru().peek(&hash2).is_some()); + assert_eq!(worker.lru().len(), 2); + + // Hash 1 should have gotten unpinned, since its oldest. + assert_eq!(backend.pin_refs(&hash1), Some(0)); + assert_eq!(backend.pin_refs(&hash2), Some(1)); + assert_eq!(backend.pin_refs(&hash3), Some(1)); + + // Hash 2 is getting bumped. + worker.handle_announce_message(hash2); + assert_eq!(worker.lru().peek(&hash2), Some(&2)); + + // Since hash 2 was accessed, evict hash 3. + worker.handle_announce_message(hash4); + assert_eq!(worker.lru().peek(&hash3), None); + } +} diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index faa3f455a580c8713ced21b14818da874fd972a7..730cfe367122b4a92c5d776743deba5136d6fa9e 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -164,8 +164,9 @@ where pool_api: Arc, best_block_hash: Block::Hash, finalized_hash: Block::Hash, + options: graph::Options, ) -> (Self, Pin + Send>>) { - let pool = Arc::new(graph::Pool::new(Default::default(), true.into(), pool_api.clone())); + let pool = Arc::new(graph::Pool::new(options, true.into(), pool_api.clone())); let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background( pool_api.clone(), pool.clone(), @@ -658,8 +659,13 @@ where }) .unwrap_or_default() .into_iter() - .filter(|tx| tx.is_signed().unwrap_or(true)); - + // TODO [#2415]: This isn't really what we mean - we really want a + // `tx.is_transaction`, since bare transactions may be gossipped as in the case + // of Frontier txs or claims. This will be sorted once we dispense with the + // concept of bare transactions and make inherents the only possible type of + // extrinsics which are bare. At this point we can change this to + // `tx.is_transaction()`. + .filter(|tx| !tx.is_bare()); let mut resubmitted_to_report = 0; resubmit_transactions.extend(block_transactions.into_iter().filter(|tx| { diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 6b1a197440c11e69805465ed4c7195f07eaafc0f..461b9860d414a3495dc34bf4f149963fa4d0a903 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -73,7 +73,7 @@ fn create_basic_pool_with_genesis( .map(|blocks| blocks[0].0.header.hash()) .expect("there is block 0. qed") }; - BasicPool::new_test(test_api, genesis_hash, genesis_hash) + BasicPool::new_test(test_api, genesis_hash, genesis_hash, Default::default()) } fn create_basic_pool(test_api: TestApi) -> BasicPool { @@ -994,6 +994,7 @@ fn import_notification_to_pool_maintain_works() { )), best_hash, finalized_hash, + Default::default(), ) .0, ); diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 3f148bf4c83bfcbcf1892bdb0ebb83abb70b9cce..6746723e72f79498cf764199866936b93373858e 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -19,8 +19,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # external deps -parity-scale-codec = { version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.6.0", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.2.2", default-features = false, features = [ + "derive", +] } +scale-info = { version = "2.6.0", default-features = false, features = [ + "derive", +] } # primitive deps, used for developing FRAME pallets. sp-runtime = { default-features = false, path = "../primitives/runtime" } @@ -57,8 +61,6 @@ pallet-examples = { path = "./examples" } default = ["runtime", "std"] experimental = ["frame-support/experimental"] runtime = [ - "frame-executive", - "frame-system-rpc-runtime-api", "sp-api", "sp-block-builder", "sp-consensus-aura", @@ -68,6 +70,9 @@ runtime = [ "sp-session", "sp-transaction-pool", "sp-version", + + "frame-executive", + "frame-system-rpc-runtime-api", ] std = [ "frame-executive?/std", diff --git a/substrate/frame/alliance/src/migration.rs b/substrate/frame/alliance/src/migration.rs index e3a44a7887e976cfa6c5d0cfd48c251348edb26b..432f09a16f47772ead741a0f312eefeb795e2d69 100644 --- a/substrate/frame/alliance/src/migration.rs +++ b/substrate/frame/alliance/src/migration.rs @@ -19,19 +19,19 @@ use crate::{Config, Pallet, Weight, LOG_TARGET}; use frame_support::{pallet_prelude::*, storage::migration, traits::OnRuntimeUpgrade}; use log; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); /// Wrapper for all migrations of this pallet. pub fn migrate, I: 'static>() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); let mut weight: Weight = Weight::zero(); - if onchain_version < 1 { + if on_chain_version < 1 { weight = weight.saturating_add(v0_to_v1::migrate::()); } - if onchain_version < 2 { + if on_chain_version < 2 { weight = weight.saturating_add(v1_to_v2::migrate::()); } diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs index 4a65485ed8f6643e2ab38e49150eb3901ded7559..fd44d33ef93ce3a212d67847f021f2f91077d461 100644 --- a/substrate/frame/alliance/src/mock.rs +++ b/substrate/frame/alliance/src/mock.rs @@ -208,7 +208,7 @@ impl ProposalProvider for AllianceProposalProvider } fn proposal_of(proposal_hash: H256) -> Option { - AllianceMotion::proposal_of(proposal_hash) + pallet_collective::ProposalOf::::get(proposal_hash) } } diff --git a/substrate/frame/alliance/src/tests.rs b/substrate/frame/alliance/src/tests.rs index 8011627b237af15e8d30379bf14afc2564421050..710de5a54bc95a607b51f4078db94d7c24c92272 100644 --- a/substrate/frame/alliance/src/tests.rs +++ b/substrate/frame/alliance/src/tests.rs @@ -187,8 +187,8 @@ fn propose_works() { Box::new(proposal.clone()), proposal_len )); - assert_eq!(*AllianceMotion::proposals(), vec![hash]); - assert_eq!(AllianceMotion::proposal_of(&hash), Some(proposal)); + assert_eq!(*pallet_collective::Proposals::::get(), vec![hash]); + assert_eq!(pallet_collective::ProposalOf::::get(&hash), Some(proposal)); assert_eq!( System::events(), vec![EventRecord { diff --git a/substrate/frame/alliance/src/weights.rs b/substrate/frame/alliance/src/weights.rs index b5bb50957207f6c75cbca0479d97127ffcca572a..0b2d1fca43ca68d3691ee533cf451cc560108390 100644 --- a/substrate/frame/alliance/src/weights.rs +++ b/substrate/frame/alliance/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_alliance +//! Autogenerated weights for `pallet_alliance` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/alliance/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/alliance/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_alliance. +/// Weight functions needed for `pallet_alliance`. pub trait WeightInfo { fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight; fn vote(m: u32, ) -> Weight; @@ -74,205 +73,209 @@ pub trait WeightInfo { fn abdicate_fellow_status() -> Weight; } -/// Weights for pallet_alliance using the Substrate node and recommended hardware. +/// Weights for `pallet_alliance` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion ProposalOf (r:1 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalCount (r:1 w:1) - /// Proof Skipped: AllianceMotion ProposalCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Voting (r:0 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalCount` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:0 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[2, 100]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `653 + m * (32 ±0) + p * (35 ±0)` + // Measured: `654 + m * (32 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (32 ±0) + p * (36 ±0)` - // Minimum execution time: 36_908_000 picoseconds. - Weight::from_parts(39_040_304, 6676) - // Standard Error: 131 - .saturating_add(Weight::from_parts(781, 0).saturating_mul(b.into())) - // Standard Error: 1_375 - .saturating_add(Weight::from_parts(48_745, 0).saturating_mul(m.into())) - // Standard Error: 1_358 - .saturating_add(Weight::from_parts(148_047, 0).saturating_mul(p.into())) + // Minimum execution time: 30_801_000 picoseconds. + Weight::from_parts(32_942_969, 6676) + // Standard Error: 112 + .saturating_add(Weight::from_parts(614, 0).saturating_mul(b.into())) + // Standard Error: 1_177 + .saturating_add(Weight::from_parts(45_758, 0).saturating_mul(m.into())) + // Standard Error: 1_162 + .saturating_add(Weight::from_parts(136_600, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1042 + m * (64 ±0)` + // Measured: `1113 + m * (64 ±0)` // Estimated: `6676 + m * (64 ±0)` - // Minimum execution time: 30_166_000 picoseconds. - Weight::from_parts(32_798_454, 6676) - // Standard Error: 1_432 - .saturating_add(Weight::from_parts(83_001, 0).saturating_mul(m.into())) + // Minimum execution time: 29_705_000 picoseconds. + Weight::from_parts(30_274_070, 6676) + // Standard Error: 884 + .saturating_add(Weight::from_parts(71_178, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:0 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `576 + m * (96 ±0) + p * (36 ±0)` + // Measured: `640 + m * (96 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 45_173_000 picoseconds. - Weight::from_parts(42_192_020, 6676) - // Standard Error: 1_456 - .saturating_add(Weight::from_parts(66_751, 0).saturating_mul(m.into())) - // Standard Error: 1_420 - .saturating_add(Weight::from_parts(158_161, 0).saturating_mul(p.into())) + // Minimum execution time: 38_596_000 picoseconds. + Weight::from_parts(36_445_536, 6676) + // Standard Error: 1_217 + .saturating_add(Weight::from_parts(69_976, 0).saturating_mul(m.into())) + // Standard Error: 1_187 + .saturating_add(Weight::from_parts(149_706, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:1 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1087 + m * (96 ±0) + p * (39 ±0)` + // Measured: `1220 + m * (96 ±0) + p * (39 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (40 ±0)` - // Minimum execution time: 58_290_000 picoseconds. - Weight::from_parts(54_924_919, 6676) - // Standard Error: 157 - .saturating_add(Weight::from_parts(464, 0).saturating_mul(b.into())) - // Standard Error: 1_665 - .saturating_add(Weight::from_parts(73_183, 0).saturating_mul(m.into())) - // Standard Error: 1_623 - .saturating_add(Weight::from_parts(168_318, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Minimum execution time: 57_602_000 picoseconds. + Weight::from_parts(55_147_214, 6676) + // Standard Error: 127 + .saturating_add(Weight::from_parts(1_650, 0).saturating_mul(b.into())) + // Standard Error: 1_346 + .saturating_add(Weight::from_parts(56_056, 0).saturating_mul(m.into())) + // Standard Error: 1_312 + .saturating_add(Weight::from_parts(168_247, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:1 w:0) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:0 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[2, 100]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `577 + m * (96 ±0) + p * (36 ±0)` + // Measured: `641 + m * (96 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 46_794_000 picoseconds. - Weight::from_parts(43_092_958, 6676) - // Standard Error: 1_273 - .saturating_add(Weight::from_parts(71_054, 0).saturating_mul(m.into())) - // Standard Error: 1_257 - .saturating_add(Weight::from_parts(152_820, 0).saturating_mul(p.into())) + // Minimum execution time: 40_755_000 picoseconds. + Weight::from_parts(36_953_935, 6676) + // Standard Error: 1_177 + .saturating_add(Weight::from_parts(73_240, 0).saturating_mul(m.into())) + // Standard Error: 1_162 + .saturating_add(Weight::from_parts(149_412, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:1 w:0) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:0 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[5, 100]`. /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `684 + m * (96 ±0) + p * (35 ±0)` + // Measured: `694 + m * (96 ±0) + p * (35 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 47_338_000 picoseconds. - Weight::from_parts(41_257_479, 6676) - // Standard Error: 119 - .saturating_add(Weight::from_parts(1_019, 0).saturating_mul(b.into())) - // Standard Error: 1_277 - .saturating_add(Weight::from_parts(78_453, 0).saturating_mul(m.into())) - // Standard Error: 1_231 - .saturating_add(Weight::from_parts(150_991, 0).saturating_mul(p.into())) + // Minimum execution time: 41_113_000 picoseconds. + Weight::from_parts(36_610_116, 6676) + // Standard Error: 92 + .saturating_add(Weight::from_parts(1_157, 0).saturating_mul(b.into())) + // Standard Error: 984 + .saturating_add(Weight::from_parts(63_050, 0).saturating_mul(m.into())) + // Standard Error: 949 + .saturating_add(Weight::from_parts(150_420, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:2 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Members (r:1 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:1 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. /// The range of component `z` is `[0, 100]`. fn init_members(m: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `217` + // Measured: `250` // Estimated: `12362` - // Minimum execution time: 35_012_000 picoseconds. - Weight::from_parts(24_288_079, 12362) - // Standard Error: 878 - .saturating_add(Weight::from_parts(153_615, 0).saturating_mul(m.into())) - // Standard Error: 867 - .saturating_add(Weight::from_parts(129_307, 0).saturating_mul(z.into())) + // Minimum execution time: 30_249_000 picoseconds. + Weight::from_parts(21_364_868, 12362) + // Standard Error: 887 + .saturating_add(Weight::from_parts(131_624, 0).saturating_mul(m.into())) + // Standard Error: 877 + .saturating_add(Weight::from_parts(105_379, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Alliance Members (r:2 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Alliance DepositOf (r:200 w:50) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: System Account (r:50 w:50) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::DepositOf` (r:200 w:50) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:50 w:50) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `x` is `[1, 100]`. /// The range of component `y` is `[0, 100]`. /// The range of component `z` is `[0, 50]`. @@ -280,14 +283,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (251 ±0)` // Estimated: `12362 + x * (2539 ±0) + y * (2539 ±0) + z * (2603 ±1)` - // Minimum execution time: 309_235_000 picoseconds. - Weight::from_parts(311_279_000, 12362) - // Standard Error: 26_510 - .saturating_add(Weight::from_parts(543_475, 0).saturating_mul(x.into())) - // Standard Error: 26_382 - .saturating_add(Weight::from_parts(603_169, 0).saturating_mul(y.into())) - // Standard Error: 52_716 - .saturating_add(Weight::from_parts(16_264_836, 0).saturating_mul(z.into())) + // Minimum execution time: 307_414_000 picoseconds. + Weight::from_parts(309_960_000, 12362) + // Standard Error: 29_278 + .saturating_add(Weight::from_parts(588_774, 0).saturating_mul(x.into())) + // Standard Error: 29_137 + .saturating_add(Weight::from_parts(563_245, 0).saturating_mul(y.into())) + // Standard Error: 58_221 + .saturating_add(Weight::from_parts(13_947_604, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into()))) @@ -298,397 +301,401 @@ impl WeightInfo for SubstrateWeight { .saturating_add(Weight::from_parts(0, 2539).saturating_mul(y.into())) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(z.into())) } - /// Storage: Alliance Rule (r:0 w:1) - /// Proof: Alliance Rule (max_values: Some(1), max_size: Some(87), added: 582, mode: MaxEncodedLen) + /// Storage: `Alliance::Rule` (r:0 w:1) + /// Proof: `Alliance::Rule` (`max_values`: Some(1), `max_size`: Some(87), added: 582, mode: `MaxEncodedLen`) fn set_rule() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_833_000 picoseconds. - Weight::from_parts(9_313_000, 0) + // Minimum execution time: 6_156_000 picoseconds. + Weight::from_parts(6_560_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Alliance Announcements (r:1 w:1) - /// Proof: Alliance Announcements (max_values: Some(1), max_size: Some(8702), added: 9197, mode: MaxEncodedLen) + /// Storage: `Alliance::Announcements` (r:1 w:1) + /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn announce() -> Weight { // Proof Size summary in bytes: - // Measured: `246` + // Measured: `279` // Estimated: `10187` - // Minimum execution time: 12_231_000 picoseconds. - Weight::from_parts(12_761_000, 10187) + // Minimum execution time: 8_988_000 picoseconds. + Weight::from_parts(9_476_000, 10187) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Alliance Announcements (r:1 w:1) - /// Proof: Alliance Announcements (max_values: Some(1), max_size: Some(8702), added: 9197, mode: MaxEncodedLen) + /// Storage: `Alliance::Announcements` (r:1 w:1) + /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn remove_announcement() -> Weight { // Proof Size summary in bytes: - // Measured: `319` + // Measured: `352` // Estimated: `10187` - // Minimum execution time: 13_079_000 picoseconds. - Weight::from_parts(13_612_000, 10187) + // Minimum execution time: 10_126_000 picoseconds. + Weight::from_parts(10_755_000, 10187) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Alliance Members (r:3 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousAccounts (r:1 w:0) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Alliance DepositOf (r:0 w:1) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Alliance::DepositOf` (r:0 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) fn join_alliance() -> Weight { // Proof Size summary in bytes: - // Measured: `468` + // Measured: `501` // Estimated: `18048` - // Minimum execution time: 44_574_000 picoseconds. - Weight::from_parts(46_157_000, 18048) + // Minimum execution time: 38_878_000 picoseconds. + Weight::from_parts(40_493_000, 18048) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Alliance Members (r:3 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousAccounts (r:1 w:0) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) fn nominate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `400` // Estimated: `18048` - // Minimum execution time: 26_114_000 picoseconds. - Weight::from_parts(27_069_000, 18048) + // Minimum execution time: 23_265_000 picoseconds. + Weight::from_parts(24_703_000, 18048) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Alliance Members (r:2 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn elevate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `443` + // Measured: `476` // Estimated: `12362` - // Minimum execution time: 25_882_000 picoseconds. - Weight::from_parts(26_923_000, 12362) + // Minimum execution time: 23_049_000 picoseconds. + Weight::from_parts(23_875_000, 12362) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Alliance Members (r:4 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Alliance RetiringMembers (r:0 w:1) - /// Proof: Alliance RetiringMembers (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Alliance::Members` (r:4 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::RetiringMembers` (r:0 w:1) + /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn give_retirement_notice() -> Weight { // Proof Size summary in bytes: - // Measured: `443` + // Measured: `476` // Estimated: `23734` - // Minimum execution time: 34_112_000 picoseconds. - Weight::from_parts(35_499_000, 23734) + // Minimum execution time: 29_124_000 picoseconds. + Weight::from_parts(30_369_000, 23734) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Alliance RetiringMembers (r:1 w:1) - /// Proof: Alliance RetiringMembers (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: Alliance Members (r:1 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: Alliance DepositOf (r:1 w:1) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Alliance::RetiringMembers` (r:1 w:1) + /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Alliance::Members` (r:1 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::DepositOf` (r:1 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn retire() -> Weight { // Proof Size summary in bytes: - // Measured: `687` + // Measured: `720` // Estimated: `6676` - // Minimum execution time: 41_239_000 picoseconds. - Weight::from_parts(42_764_000, 6676) + // Minimum execution time: 36_376_000 picoseconds. + Weight::from_parts(38_221_000, 6676) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Alliance Members (r:3 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Alliance DepositOf (r:1 w:1) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::DepositOf` (r:1 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn kick_member() -> Weight { // Proof Size summary in bytes: - // Measured: `707` + // Measured: `740` // Estimated: `18048` - // Minimum execution time: 68_071_000 picoseconds. - Weight::from_parts(71_808_000, 18048) + // Minimum execution time: 56_560_000 picoseconds. + Weight::from_parts(58_621_000, 18048) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Alliance UnscrupulousAccounts (r:1 w:1) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// Proof: Alliance UnscrupulousWebsites (max_values: Some(1), max_size: Some(25702), added: 26197, mode: MaxEncodedLen) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. /// The range of component `l` is `[0, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `246` + // Measured: `279` // Estimated: `27187` - // Minimum execution time: 7_006_000 picoseconds. - Weight::from_parts(7_253_000, 27187) - // Standard Error: 3_403 - .saturating_add(Weight::from_parts(1_680_082, 0).saturating_mul(n.into())) - // Standard Error: 1_333 - .saturating_add(Weight::from_parts(72_943, 0).saturating_mul(l.into())) + // Minimum execution time: 5_191_000 picoseconds. + Weight::from_parts(5_410_000, 27187) + // Standard Error: 3_215 + .saturating_add(Weight::from_parts(1_018_569, 0).saturating_mul(n.into())) + // Standard Error: 1_259 + .saturating_add(Weight::from_parts(68_712, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Alliance UnscrupulousAccounts (r:1 w:1) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// Proof: Alliance UnscrupulousWebsites (max_values: Some(1), max_size: Some(25702), added: 26197, mode: MaxEncodedLen) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. /// The range of component `l` is `[0, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + l * (100 ±0) + n * (289 ±0)` // Estimated: `27187` - // Minimum execution time: 7_292_000 picoseconds. - Weight::from_parts(7_629_000, 27187) - // Standard Error: 176_225 - .saturating_add(Weight::from_parts(16_646_429, 0).saturating_mul(n.into())) - // Standard Error: 69_017 - .saturating_add(Weight::from_parts(310_978, 0).saturating_mul(l.into())) + // Minimum execution time: 5_361_000 picoseconds. + Weight::from_parts(5_494_000, 27187) + // Standard Error: 181_133 + .saturating_add(Weight::from_parts(16_322_982, 0).saturating_mul(n.into())) + // Standard Error: 70_940 + .saturating_add(Weight::from_parts(343_581, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Alliance Members (r:3 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:3 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn abdicate_fellow_status() -> Weight { // Proof Size summary in bytes: - // Measured: `443` + // Measured: `476` // Estimated: `18048` - // Minimum execution time: 31_798_000 picoseconds. - Weight::from_parts(33_463_000, 18048) + // Minimum execution time: 28_856_000 picoseconds. + Weight::from_parts(29_875_000, 18048) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion ProposalOf (r:1 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalCount (r:1 w:1) - /// Proof Skipped: AllianceMotion ProposalCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Voting (r:0 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalCount` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:0 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[2, 100]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `653 + m * (32 ±0) + p * (35 ±0)` + // Measured: `654 + m * (32 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (32 ±0) + p * (36 ±0)` - // Minimum execution time: 36_908_000 picoseconds. - Weight::from_parts(39_040_304, 6676) - // Standard Error: 131 - .saturating_add(Weight::from_parts(781, 0).saturating_mul(b.into())) - // Standard Error: 1_375 - .saturating_add(Weight::from_parts(48_745, 0).saturating_mul(m.into())) - // Standard Error: 1_358 - .saturating_add(Weight::from_parts(148_047, 0).saturating_mul(p.into())) + // Minimum execution time: 30_801_000 picoseconds. + Weight::from_parts(32_942_969, 6676) + // Standard Error: 112 + .saturating_add(Weight::from_parts(614, 0).saturating_mul(b.into())) + // Standard Error: 1_177 + .saturating_add(Weight::from_parts(45_758, 0).saturating_mul(m.into())) + // Standard Error: 1_162 + .saturating_add(Weight::from_parts(136_600, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1042 + m * (64 ±0)` + // Measured: `1113 + m * (64 ±0)` // Estimated: `6676 + m * (64 ±0)` - // Minimum execution time: 30_166_000 picoseconds. - Weight::from_parts(32_798_454, 6676) - // Standard Error: 1_432 - .saturating_add(Weight::from_parts(83_001, 0).saturating_mul(m.into())) + // Minimum execution time: 29_705_000 picoseconds. + Weight::from_parts(30_274_070, 6676) + // Standard Error: 884 + .saturating_add(Weight::from_parts(71_178, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:0 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `576 + m * (96 ±0) + p * (36 ±0)` + // Measured: `640 + m * (96 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 45_173_000 picoseconds. - Weight::from_parts(42_192_020, 6676) - // Standard Error: 1_456 - .saturating_add(Weight::from_parts(66_751, 0).saturating_mul(m.into())) - // Standard Error: 1_420 - .saturating_add(Weight::from_parts(158_161, 0).saturating_mul(p.into())) + // Minimum execution time: 38_596_000 picoseconds. + Weight::from_parts(36_445_536, 6676) + // Standard Error: 1_217 + .saturating_add(Weight::from_parts(69_976, 0).saturating_mul(m.into())) + // Standard Error: 1_187 + .saturating_add(Weight::from_parts(149_706, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:1 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1087 + m * (96 ±0) + p * (39 ±0)` + // Measured: `1220 + m * (96 ±0) + p * (39 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (40 ±0)` - // Minimum execution time: 58_290_000 picoseconds. - Weight::from_parts(54_924_919, 6676) - // Standard Error: 157 - .saturating_add(Weight::from_parts(464, 0).saturating_mul(b.into())) - // Standard Error: 1_665 - .saturating_add(Weight::from_parts(73_183, 0).saturating_mul(m.into())) - // Standard Error: 1_623 - .saturating_add(Weight::from_parts(168_318, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Minimum execution time: 57_602_000 picoseconds. + Weight::from_parts(55_147_214, 6676) + // Standard Error: 127 + .saturating_add(Weight::from_parts(1_650, 0).saturating_mul(b.into())) + // Standard Error: 1_346 + .saturating_add(Weight::from_parts(56_056, 0).saturating_mul(m.into())) + // Standard Error: 1_312 + .saturating_add(Weight::from_parts(168_247, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:1 w:0) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:0 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[2, 100]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `577 + m * (96 ±0) + p * (36 ±0)` + // Measured: `641 + m * (96 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 46_794_000 picoseconds. - Weight::from_parts(43_092_958, 6676) - // Standard Error: 1_273 - .saturating_add(Weight::from_parts(71_054, 0).saturating_mul(m.into())) - // Standard Error: 1_257 - .saturating_add(Weight::from_parts(152_820, 0).saturating_mul(p.into())) + // Minimum execution time: 40_755_000 picoseconds. + Weight::from_parts(36_953_935, 6676) + // Standard Error: 1_177 + .saturating_add(Weight::from_parts(73_240, 0).saturating_mul(m.into())) + // Standard Error: 1_162 + .saturating_add(Weight::from_parts(149_412, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:1 w:0) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:0 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[5, 100]`. /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `684 + m * (96 ±0) + p * (35 ±0)` + // Measured: `694 + m * (96 ±0) + p * (35 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 47_338_000 picoseconds. - Weight::from_parts(41_257_479, 6676) - // Standard Error: 119 - .saturating_add(Weight::from_parts(1_019, 0).saturating_mul(b.into())) - // Standard Error: 1_277 - .saturating_add(Weight::from_parts(78_453, 0).saturating_mul(m.into())) - // Standard Error: 1_231 - .saturating_add(Weight::from_parts(150_991, 0).saturating_mul(p.into())) + // Minimum execution time: 41_113_000 picoseconds. + Weight::from_parts(36_610_116, 6676) + // Standard Error: 92 + .saturating_add(Weight::from_parts(1_157, 0).saturating_mul(b.into())) + // Standard Error: 984 + .saturating_add(Weight::from_parts(63_050, 0).saturating_mul(m.into())) + // Standard Error: 949 + .saturating_add(Weight::from_parts(150_420, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:2 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Members (r:1 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:1 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. /// The range of component `z` is `[0, 100]`. fn init_members(m: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `217` + // Measured: `250` // Estimated: `12362` - // Minimum execution time: 35_012_000 picoseconds. - Weight::from_parts(24_288_079, 12362) - // Standard Error: 878 - .saturating_add(Weight::from_parts(153_615, 0).saturating_mul(m.into())) - // Standard Error: 867 - .saturating_add(Weight::from_parts(129_307, 0).saturating_mul(z.into())) + // Minimum execution time: 30_249_000 picoseconds. + Weight::from_parts(21_364_868, 12362) + // Standard Error: 887 + .saturating_add(Weight::from_parts(131_624, 0).saturating_mul(m.into())) + // Standard Error: 877 + .saturating_add(Weight::from_parts(105_379, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Alliance Members (r:2 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Alliance DepositOf (r:200 w:50) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: System Account (r:50 w:50) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::DepositOf` (r:200 w:50) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:50 w:50) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `x` is `[1, 100]`. /// The range of component `y` is `[0, 100]`. /// The range of component `z` is `[0, 50]`. @@ -696,14 +703,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (251 ±0)` // Estimated: `12362 + x * (2539 ±0) + y * (2539 ±0) + z * (2603 ±1)` - // Minimum execution time: 309_235_000 picoseconds. - Weight::from_parts(311_279_000, 12362) - // Standard Error: 26_510 - .saturating_add(Weight::from_parts(543_475, 0).saturating_mul(x.into())) - // Standard Error: 26_382 - .saturating_add(Weight::from_parts(603_169, 0).saturating_mul(y.into())) - // Standard Error: 52_716 - .saturating_add(Weight::from_parts(16_264_836, 0).saturating_mul(z.into())) + // Minimum execution time: 307_414_000 picoseconds. + Weight::from_parts(309_960_000, 12362) + // Standard Error: 29_278 + .saturating_add(Weight::from_parts(588_774, 0).saturating_mul(x.into())) + // Standard Error: 29_137 + .saturating_add(Weight::from_parts(563_245, 0).saturating_mul(y.into())) + // Standard Error: 58_221 + .saturating_add(Weight::from_parts(13_947_604, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(y.into()))) @@ -714,194 +721,194 @@ impl WeightInfo for () { .saturating_add(Weight::from_parts(0, 2539).saturating_mul(y.into())) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(z.into())) } - /// Storage: Alliance Rule (r:0 w:1) - /// Proof: Alliance Rule (max_values: Some(1), max_size: Some(87), added: 582, mode: MaxEncodedLen) + /// Storage: `Alliance::Rule` (r:0 w:1) + /// Proof: `Alliance::Rule` (`max_values`: Some(1), `max_size`: Some(87), added: 582, mode: `MaxEncodedLen`) fn set_rule() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_833_000 picoseconds. - Weight::from_parts(9_313_000, 0) + // Minimum execution time: 6_156_000 picoseconds. + Weight::from_parts(6_560_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Alliance Announcements (r:1 w:1) - /// Proof: Alliance Announcements (max_values: Some(1), max_size: Some(8702), added: 9197, mode: MaxEncodedLen) + /// Storage: `Alliance::Announcements` (r:1 w:1) + /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn announce() -> Weight { // Proof Size summary in bytes: - // Measured: `246` + // Measured: `279` // Estimated: `10187` - // Minimum execution time: 12_231_000 picoseconds. - Weight::from_parts(12_761_000, 10187) + // Minimum execution time: 8_988_000 picoseconds. + Weight::from_parts(9_476_000, 10187) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Alliance Announcements (r:1 w:1) - /// Proof: Alliance Announcements (max_values: Some(1), max_size: Some(8702), added: 9197, mode: MaxEncodedLen) + /// Storage: `Alliance::Announcements` (r:1 w:1) + /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn remove_announcement() -> Weight { // Proof Size summary in bytes: - // Measured: `319` + // Measured: `352` // Estimated: `10187` - // Minimum execution time: 13_079_000 picoseconds. - Weight::from_parts(13_612_000, 10187) + // Minimum execution time: 10_126_000 picoseconds. + Weight::from_parts(10_755_000, 10187) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Alliance Members (r:3 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousAccounts (r:1 w:0) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Alliance DepositOf (r:0 w:1) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Alliance::DepositOf` (r:0 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) fn join_alliance() -> Weight { // Proof Size summary in bytes: - // Measured: `468` + // Measured: `501` // Estimated: `18048` - // Minimum execution time: 44_574_000 picoseconds. - Weight::from_parts(46_157_000, 18048) + // Minimum execution time: 38_878_000 picoseconds. + Weight::from_parts(40_493_000, 18048) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Alliance Members (r:3 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousAccounts (r:1 w:0) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) fn nominate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `400` // Estimated: `18048` - // Minimum execution time: 26_114_000 picoseconds. - Weight::from_parts(27_069_000, 18048) + // Minimum execution time: 23_265_000 picoseconds. + Weight::from_parts(24_703_000, 18048) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Alliance Members (r:2 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn elevate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `443` + // Measured: `476` // Estimated: `12362` - // Minimum execution time: 25_882_000 picoseconds. - Weight::from_parts(26_923_000, 12362) + // Minimum execution time: 23_049_000 picoseconds. + Weight::from_parts(23_875_000, 12362) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Alliance Members (r:4 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Alliance RetiringMembers (r:0 w:1) - /// Proof: Alliance RetiringMembers (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Alliance::Members` (r:4 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::RetiringMembers` (r:0 w:1) + /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn give_retirement_notice() -> Weight { // Proof Size summary in bytes: - // Measured: `443` + // Measured: `476` // Estimated: `23734` - // Minimum execution time: 34_112_000 picoseconds. - Weight::from_parts(35_499_000, 23734) + // Minimum execution time: 29_124_000 picoseconds. + Weight::from_parts(30_369_000, 23734) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Alliance RetiringMembers (r:1 w:1) - /// Proof: Alliance RetiringMembers (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: Alliance Members (r:1 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: Alliance DepositOf (r:1 w:1) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Alliance::RetiringMembers` (r:1 w:1) + /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Alliance::Members` (r:1 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::DepositOf` (r:1 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn retire() -> Weight { // Proof Size summary in bytes: - // Measured: `687` + // Measured: `720` // Estimated: `6676` - // Minimum execution time: 41_239_000 picoseconds. - Weight::from_parts(42_764_000, 6676) + // Minimum execution time: 36_376_000 picoseconds. + Weight::from_parts(38_221_000, 6676) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Alliance Members (r:3 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Alliance DepositOf (r:1 w:1) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::DepositOf` (r:1 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn kick_member() -> Weight { // Proof Size summary in bytes: - // Measured: `707` + // Measured: `740` // Estimated: `18048` - // Minimum execution time: 68_071_000 picoseconds. - Weight::from_parts(71_808_000, 18048) + // Minimum execution time: 56_560_000 picoseconds. + Weight::from_parts(58_621_000, 18048) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Alliance UnscrupulousAccounts (r:1 w:1) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// Proof: Alliance UnscrupulousWebsites (max_values: Some(1), max_size: Some(25702), added: 26197, mode: MaxEncodedLen) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. /// The range of component `l` is `[0, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `246` + // Measured: `279` // Estimated: `27187` - // Minimum execution time: 7_006_000 picoseconds. - Weight::from_parts(7_253_000, 27187) - // Standard Error: 3_403 - .saturating_add(Weight::from_parts(1_680_082, 0).saturating_mul(n.into())) - // Standard Error: 1_333 - .saturating_add(Weight::from_parts(72_943, 0).saturating_mul(l.into())) + // Minimum execution time: 5_191_000 picoseconds. + Weight::from_parts(5_410_000, 27187) + // Standard Error: 3_215 + .saturating_add(Weight::from_parts(1_018_569, 0).saturating_mul(n.into())) + // Standard Error: 1_259 + .saturating_add(Weight::from_parts(68_712, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Alliance UnscrupulousAccounts (r:1 w:1) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// Proof: Alliance UnscrupulousWebsites (max_values: Some(1), max_size: Some(25702), added: 26197, mode: MaxEncodedLen) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. /// The range of component `l` is `[0, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + l * (100 ±0) + n * (289 ±0)` // Estimated: `27187` - // Minimum execution time: 7_292_000 picoseconds. - Weight::from_parts(7_629_000, 27187) - // Standard Error: 176_225 - .saturating_add(Weight::from_parts(16_646_429, 0).saturating_mul(n.into())) - // Standard Error: 69_017 - .saturating_add(Weight::from_parts(310_978, 0).saturating_mul(l.into())) + // Minimum execution time: 5_361_000 picoseconds. + Weight::from_parts(5_494_000, 27187) + // Standard Error: 181_133 + .saturating_add(Weight::from_parts(16_322_982, 0).saturating_mul(n.into())) + // Standard Error: 70_940 + .saturating_add(Weight::from_parts(343_581, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Alliance Members (r:3 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:3 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn abdicate_fellow_status() -> Weight { // Proof Size summary in bytes: - // Measured: `443` + // Measured: `476` // Estimated: `18048` - // Minimum execution time: 31_798_000 picoseconds. - Weight::from_parts(33_463_000, 18048) + // Minimum execution time: 28_856_000 picoseconds. + Weight::from_parts(29_875_000, 18048) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } diff --git a/substrate/frame/asset-conversion/src/weights.rs b/substrate/frame/asset-conversion/src/weights.rs index a0e687f7a4168032c59daf8616249776a123c363..4eaa115c694d27712384a5ad2ccae742609aec76 100644 --- a/substrate/frame/asset-conversion/src/weights.rs +++ b/substrate/frame/asset-conversion/src/weights.rs @@ -17,24 +17,28 @@ //! Autogenerated weights for `pallet_asset_conversion` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-30, STEPS: `5`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/debug/substrate-node +// ./target/production/substrate-node // benchmark // pallet // --chain=dev -// --steps=5 -// --repeat=2 -// --pallet=pallet-asset-conversion +// --steps=50 +// --repeat=20 +// --pallet=pallet_asset_conversion +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 // --output=./substrate/frame/asset-conversion/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -59,11 +63,9 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `AssetConversion::Pools` (r:1 w:1) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:2 w:2) + /// Storage: `Assets::Asset` (r:2 w:0) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `AssetConversion::NextPoolAssetId` (r:1 w:1) /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -73,12 +75,12 @@ impl WeightInfo for SubstrateWeight { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn create_pool() -> Weight { // Proof Size summary in bytes: - // Measured: `1081` + // Measured: `910` // Estimated: `6360` - // Minimum execution time: 1_576_000_000 picoseconds. - Weight::from_parts(1_668_000_000, 6360) - .saturating_add(T::DbWeight::get().reads(10_u64)) - .saturating_add(T::DbWeight::get().writes(10_u64)) + // Minimum execution time: 82_941_000 picoseconds. + Weight::from_parts(85_526_000, 6360) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) @@ -86,18 +88,20 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:4 w:4) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Account` (r:2 w:2) /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn add_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1761` + // Measured: `1507` // Estimated: `11426` - // Minimum execution time: 1_636_000_000 picoseconds. - Weight::from_parts(1_894_000_000, 11426) - .saturating_add(T::DbWeight::get().reads(10_u64)) - .saturating_add(T::DbWeight::get().writes(9_u64)) + // Minimum execution time: 138_424_000 picoseconds. + Weight::from_parts(142_083_000, 11426) + .saturating_add(T::DbWeight::get().reads(11_u64)) + .saturating_add(T::DbWeight::get().writes(10_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) @@ -111,10 +115,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn remove_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1750` + // Measured: `1650` // Estimated: `11426` - // Minimum execution time: 1_507_000_000 picoseconds. - Weight::from_parts(1_524_000_000, 11426) + // Minimum execution time: 122_132_000 picoseconds. + Weight::from_parts(125_143_000, 11426) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } @@ -125,12 +129,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[2, 4]`. fn swap_exact_tokens_for_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + n * (522 ±0)` + // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 937_000_000 picoseconds. - Weight::from_parts(941_000_000, 990) - // Standard Error: 40_863_477 - .saturating_add(Weight::from_parts(205_862_068, 0).saturating_mul(n.into())) + // Minimum execution time: 77_183_000 picoseconds. + Weight::from_parts(78_581_000, 990) + // Standard Error: 306_918 + .saturating_add(Weight::from_parts(10_581_054, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -142,12 +146,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[2, 4]`. fn swap_tokens_for_exact_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + n * (522 ±0)` + // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 935_000_000 picoseconds. - Weight::from_parts(947_000_000, 990) - // Standard Error: 46_904_620 - .saturating_add(Weight::from_parts(218_275_862, 0).saturating_mul(n.into())) + // Minimum execution time: 76_962_000 picoseconds. + Weight::from_parts(78_315_000, 990) + // Standard Error: 311_204 + .saturating_add(Weight::from_parts(10_702_400, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -158,11 +162,9 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { /// Storage: `AssetConversion::Pools` (r:1 w:1) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:2 w:2) + /// Storage: `Assets::Asset` (r:2 w:0) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `AssetConversion::NextPoolAssetId` (r:1 w:1) /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -172,12 +174,12 @@ impl WeightInfo for () { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn create_pool() -> Weight { // Proof Size summary in bytes: - // Measured: `1081` + // Measured: `910` // Estimated: `6360` - // Minimum execution time: 1_576_000_000 picoseconds. - Weight::from_parts(1_668_000_000, 6360) - .saturating_add(RocksDbWeight::get().reads(10_u64)) - .saturating_add(RocksDbWeight::get().writes(10_u64)) + // Minimum execution time: 82_941_000 picoseconds. + Weight::from_parts(85_526_000, 6360) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) @@ -185,18 +187,20 @@ impl WeightInfo for () { /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:4 w:4) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Account` (r:2 w:2) /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn add_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1761` + // Measured: `1507` // Estimated: `11426` - // Minimum execution time: 1_636_000_000 picoseconds. - Weight::from_parts(1_894_000_000, 11426) - .saturating_add(RocksDbWeight::get().reads(10_u64)) - .saturating_add(RocksDbWeight::get().writes(9_u64)) + // Minimum execution time: 138_424_000 picoseconds. + Weight::from_parts(142_083_000, 11426) + .saturating_add(RocksDbWeight::get().reads(11_u64)) + .saturating_add(RocksDbWeight::get().writes(10_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) @@ -210,10 +214,10 @@ impl WeightInfo for () { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn remove_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1750` + // Measured: `1650` // Estimated: `11426` - // Minimum execution time: 1_507_000_000 picoseconds. - Weight::from_parts(1_524_000_000, 11426) + // Minimum execution time: 122_132_000 picoseconds. + Weight::from_parts(125_143_000, 11426) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } @@ -224,12 +228,12 @@ impl WeightInfo for () { /// The range of component `n` is `[2, 4]`. fn swap_exact_tokens_for_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + n * (522 ±0)` + // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 937_000_000 picoseconds. - Weight::from_parts(941_000_000, 990) - // Standard Error: 40_863_477 - .saturating_add(Weight::from_parts(205_862_068, 0).saturating_mul(n.into())) + // Minimum execution time: 77_183_000 picoseconds. + Weight::from_parts(78_581_000, 990) + // Standard Error: 306_918 + .saturating_add(Weight::from_parts(10_581_054, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -241,12 +245,12 @@ impl WeightInfo for () { /// The range of component `n` is `[2, 4]`. fn swap_tokens_for_exact_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + n * (522 ±0)` + // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 935_000_000 picoseconds. - Weight::from_parts(947_000_000, 990) - // Standard Error: 46_904_620 - .saturating_add(Weight::from_parts(218_275_862, 0).saturating_mul(n.into())) + // Minimum execution time: 76_962_000 picoseconds. + Weight::from_parts(78_315_000, 990) + // Standard Error: 311_204 + .saturating_add(Weight::from_parts(10_702_400, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) diff --git a/substrate/frame/asset-rate/src/weights.rs b/substrate/frame/asset-rate/src/weights.rs index 582e20e56d7dc7f40a0cbb3838622b309e8223d4..b8723ee3bed5cbc124a8e7c1237e5c1af2e5a154 100644 --- a/substrate/frame/asset-rate/src/weights.rs +++ b/substrate/frame/asset-rate/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_asset_rate +//! Autogenerated weights for `pallet_asset_rate` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/asset-rate/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/asset-rate/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,83 +49,83 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_asset_rate. +/// Weight functions needed for `pallet_asset_rate`. pub trait WeightInfo { fn create() -> Weight; fn update() -> Weight; fn remove() -> Weight; } -/// Weights for pallet_asset_rate using the Substrate node and recommended hardware. +/// Weights for `pallet_asset_rate` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3501` - // Minimum execution time: 11_700_000 picoseconds. - Weight::from_parts(12_158_000, 3501) + // Minimum execution time: 9_447_000 picoseconds. + Weight::from_parts(10_078_000, 3501) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn update() -> Weight { // Proof Size summary in bytes: // Measured: `137` // Estimated: `3501` - // Minimum execution time: 12_119_000 picoseconds. - Weight::from_parts(12_548_000, 3501) + // Minimum execution time: 9_844_000 picoseconds. + Weight::from_parts(10_240_000, 3501) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn remove() -> Weight { // Proof Size summary in bytes: // Measured: `137` // Estimated: `3501` - // Minimum execution time: 12_541_000 picoseconds. - Weight::from_parts(12_956_000, 3501) + // Minimum execution time: 10_411_000 picoseconds. + Weight::from_parts(10_686_000, 3501) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3501` - // Minimum execution time: 11_700_000 picoseconds. - Weight::from_parts(12_158_000, 3501) + // Minimum execution time: 9_447_000 picoseconds. + Weight::from_parts(10_078_000, 3501) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn update() -> Weight { // Proof Size summary in bytes: // Measured: `137` // Estimated: `3501` - // Minimum execution time: 12_119_000 picoseconds. - Weight::from_parts(12_548_000, 3501) + // Minimum execution time: 9_844_000 picoseconds. + Weight::from_parts(10_240_000, 3501) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn remove() -> Weight { // Proof Size summary in bytes: // Measured: `137` // Estimated: `3501` - // Minimum execution time: 12_541_000 picoseconds. - Weight::from_parts(12_956_000, 3501) + // Minimum execution time: 10_411_000 picoseconds. + Weight::from_parts(10_686_000, 3501) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index cafe7bb1a3b53df4b48d2a1b9a45cdf14c18cea8..60316f8cdcf1f40eebcaad3ac182aefb513bce39 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -208,7 +208,7 @@ pub mod pallet { }; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/assets/src/migration.rs b/substrate/frame/assets/src/migration.rs index ff0ffbff0d362fbd630e22dcb293f28e2b75469c..dd7c12293e80f410301bb47cca013ae4f013e72f 100644 --- a/substrate/frame/assets/src/migration.rs +++ b/substrate/frame/assets/src/migration.rs @@ -67,9 +67,9 @@ pub mod v1 { pub struct MigrateToV1(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 0 && current_version == 1 { + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); + if on_chain_version == 0 && in_code_version == 1 { let mut translated = 0u64; Asset::::translate::< OldAssetDetails>, @@ -78,12 +78,12 @@ pub mod v1 { translated.saturating_inc(); Some(old_value.migrate_to_v1()) }); - current_version.put::>(); + in_code_version.put::>(); log::info!( target: LOG_TARGET, "Upgraded {} pools, storage to version {:?}", translated, - current_version + in_code_version ); T::DbWeight::get().reads_writes(translated + 1, translated + 1) } else { @@ -116,13 +116,13 @@ pub mod v1 { "the asset count before and after the migration should be the same" ); - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - frame_support::ensure!(current_version == 1, "must_upgrade"); + frame_support::ensure!(in_code_version == 1, "must_upgrade"); ensure!( - current_version == onchain_version, - "after migration, the current_version and onchain_version should be the same" + in_code_version == on_chain_version, + "after migration, the in_code_version and on_chain_version should be the same" ); Asset::::iter().try_for_each(|(_id, asset)| -> Result<(), TryRuntimeError> { diff --git a/substrate/frame/assets/src/weights.rs b/substrate/frame/assets/src/weights.rs index f20f7e317cff7c32d280b2e6fa9eaa9aa0fdbd57..f5199105fe354c221779bbacab6a99b33dce3981 100644 --- a/substrate/frame/assets/src/weights.rs +++ b/substrate/frame/assets/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_assets +//! Autogenerated weights for `pallet_assets` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/assets/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/assets/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_assets. +/// Weight functions needed for `pallet_assets`. pub trait WeightInfo { fn create() -> Weight; fn force_create() -> Weight; @@ -86,882 +85,890 @@ pub trait WeightInfo { fn block() -> Weight; } -/// Weights for pallet_assets using the Substrate node and recommended hardware. +/// Weights for `pallet_assets` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3675` - // Minimum execution time: 31_340_000 picoseconds. - Weight::from_parts(31_977_000, 3675) + // Minimum execution time: 24_690_000 picoseconds. + Weight::from_parts(25_878_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `153` // Estimated: `3675` - // Minimum execution time: 13_342_000 picoseconds. - Weight::from_parts(13_782_000, 3675) + // Minimum execution time: 10_997_000 picoseconds. + Weight::from_parts(11_369_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn start_destroy() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 14_437_000 picoseconds. - Weight::from_parts(14_833_000, 3675) + // Minimum execution time: 11_536_000 picoseconds. + Weight::from_parts(12_309_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1001 w:1000) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1000 w:1000) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1001 w:1000) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1000 w:1000) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn destroy_accounts(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + c * (208 ±0)` // Estimated: `3675 + c * (2609 ±0)` - // Minimum execution time: 18_728_000 picoseconds. - Weight::from_parts(18_982_000, 3675) - // Standard Error: 11_708 - .saturating_add(Weight::from_parts(14_363_570, 0).saturating_mul(c.into())) + // Minimum execution time: 15_798_000 picoseconds. + Weight::from_parts(16_005_000, 3675) + // Standard Error: 7_818 + .saturating_add(Weight::from_parts(12_826_278, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2609).saturating_mul(c.into())) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1001 w:1000) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1001 w:1000) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 1000]`. fn destroy_approvals(a: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `522 + a * (86 ±0)` // Estimated: `3675 + a * (2623 ±0)` - // Minimum execution time: 18_611_000 picoseconds. - Weight::from_parts(18_970_000, 3675) - // Standard Error: 13_224 - .saturating_add(Weight::from_parts(16_397_299, 0).saturating_mul(a.into())) + // Minimum execution time: 16_334_000 picoseconds. + Weight::from_parts(16_616_000, 3675) + // Standard Error: 6_402 + .saturating_add(Weight::from_parts(13_502_238, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) .saturating_add(Weight::from_parts(0, 2623).saturating_mul(a.into())) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:0) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:0) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn finish_destroy() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 14_504_000 picoseconds. - Weight::from_parts(14_906_000, 3675) + // Minimum execution time: 12_278_000 picoseconds. + Weight::from_parts(12_834_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn mint() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 26_653_000 picoseconds. - Weight::from_parts(27_260_000, 3675) + // Minimum execution time: 21_793_000 picoseconds. + Weight::from_parts(22_284_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn burn() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 33_625_000 picoseconds. - Weight::from_parts(34_474_000, 3675) + // Minimum execution time: 28_712_000 picoseconds. + Weight::from_parts(29_710_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 47_609_000 picoseconds. - Weight::from_parts(48_476_000, 6208) + // Minimum execution time: 41_331_000 picoseconds. + Weight::from_parts(42_362_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 41_625_000 picoseconds. - Weight::from_parts(43_030_000, 6208) + // Minimum execution time: 37_316_000 picoseconds. + Weight::from_parts(38_200_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 47_661_000 picoseconds. - Weight::from_parts(48_469_000, 6208) + // Minimum execution time: 41_347_000 picoseconds. + Weight::from_parts(42_625_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 17_727_000 picoseconds. - Weight::from_parts(18_384_000, 3675) + // Minimum execution time: 15_072_000 picoseconds. + Weight::from_parts(15_925_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn thaw() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 17_657_000 picoseconds. - Weight::from_parts(18_282_000, 3675) + // Minimum execution time: 14_991_000 picoseconds. + Weight::from_parts(15_987_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn freeze_asset() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 13_743_000 picoseconds. - Weight::from_parts(14_193_000, 3675) + // Minimum execution time: 11_296_000 picoseconds. + Weight::from_parts(12_052_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn thaw_asset() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 13_653_000 picoseconds. - Weight::from_parts(14_263_000, 3675) + // Minimum execution time: 11_446_000 picoseconds. + Weight::from_parts(11_791_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:0) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:0) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 15_328_000 picoseconds. - Weight::from_parts(16_042_000, 3675) + // Minimum execution time: 13_134_000 picoseconds. + Weight::from_parts(13_401_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn set_team() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 14_097_000 picoseconds. - Weight::from_parts(14_641_000, 3675) + // Minimum execution time: 11_395_000 picoseconds. + Weight::from_parts(11_718_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, _s: u32, ) -> Weight { + fn set_metadata(n: u32, _s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 29_535_000 picoseconds. - Weight::from_parts(31_456_892, 3675) + // Minimum execution time: 25_147_000 picoseconds. + Weight::from_parts(26_614_272, 3675) + // Standard Error: 959 + .saturating_add(Weight::from_parts(2_300, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 30_680_000 picoseconds. - Weight::from_parts(31_930_000, 3675) + // Minimum execution time: 26_094_000 picoseconds. + Weight::from_parts(27_199_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(_n: u32, s: u32, ) -> Weight { + fn force_set_metadata(n: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `190` // Estimated: `3675` - // Minimum execution time: 14_660_000 picoseconds. - Weight::from_parts(15_718_387, 3675) - // Standard Error: 622 - .saturating_add(Weight::from_parts(2_640, 0).saturating_mul(s.into())) + // Minimum execution time: 11_977_000 picoseconds. + Weight::from_parts(12_719_933, 3675) + // Standard Error: 429 + .saturating_add(Weight::from_parts(3_239, 0).saturating_mul(n.into())) + // Standard Error: 429 + .saturating_add(Weight::from_parts(3_941, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn force_clear_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 30_853_000 picoseconds. - Weight::from_parts(31_483_000, 3675) + // Minimum execution time: 25_859_000 picoseconds. + Weight::from_parts(26_654_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn force_asset_status() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 13_632_000 picoseconds. - Weight::from_parts(14_077_000, 3675) + // Minimum execution time: 10_965_000 picoseconds. + Weight::from_parts(11_595_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) fn approve_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 33_780_000 picoseconds. - Weight::from_parts(34_533_000, 3675) + // Minimum execution time: 28_427_000 picoseconds. + Weight::from_parts(29_150_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_approved() -> Weight { // Proof Size summary in bytes: // Measured: `668` // Estimated: `6208` - // Minimum execution time: 67_712_000 picoseconds. - Weight::from_parts(69_946_000, 6208) + // Minimum execution time: 60_227_000 picoseconds. + Weight::from_parts(61_839_000, 6208) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) fn cancel_approval() -> Weight { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 36_668_000 picoseconds. - Weight::from_parts(37_637_000, 3675) + // Minimum execution time: 30_738_000 picoseconds. + Weight::from_parts(31_988_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) fn force_cancel_approval() -> Weight { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 36_685_000 picoseconds. - Weight::from_parts(37_950_000, 3675) + // Minimum execution time: 31_444_000 picoseconds. + Weight::from_parts(32_126_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn set_min_balance() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 14_466_000 picoseconds. - Weight::from_parts(14_924_000, 3675) + // Minimum execution time: 11_890_000 picoseconds. + Weight::from_parts(12_462_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn touch() -> Weight { // Proof Size summary in bytes: // Measured: `453` // Estimated: `3675` - // Minimum execution time: 34_874_000 picoseconds. - Weight::from_parts(36_330_000, 3675) + // Minimum execution time: 30_675_000 picoseconds. + Weight::from_parts(31_749_000, 3675) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn touch_other() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 33_278_000 picoseconds. - Weight::from_parts(34_104_000, 3675) + // Minimum execution time: 28_290_000 picoseconds. + Weight::from_parts(29_405_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn refund() -> Weight { // Proof Size summary in bytes: // Measured: `579` // Estimated: `3675` - // Minimum execution time: 32_898_000 picoseconds. - Weight::from_parts(33_489_000, 3675) + // Minimum execution time: 30_195_000 picoseconds. + Weight::from_parts(31_105_000, 3675) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn refund_other() -> Weight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `3675` - // Minimum execution time: 31_243_000 picoseconds. - Weight::from_parts(31_909_000, 3675) + // Minimum execution time: 27_785_000 picoseconds. + Weight::from_parts(28_783_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn block() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 17_692_000 picoseconds. - Weight::from_parts(18_253_000, 3675) + // Minimum execution time: 15_318_000 picoseconds. + Weight::from_parts(16_113_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3675` - // Minimum execution time: 31_340_000 picoseconds. - Weight::from_parts(31_977_000, 3675) + // Minimum execution time: 24_690_000 picoseconds. + Weight::from_parts(25_878_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `153` // Estimated: `3675` - // Minimum execution time: 13_342_000 picoseconds. - Weight::from_parts(13_782_000, 3675) + // Minimum execution time: 10_997_000 picoseconds. + Weight::from_parts(11_369_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn start_destroy() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 14_437_000 picoseconds. - Weight::from_parts(14_833_000, 3675) + // Minimum execution time: 11_536_000 picoseconds. + Weight::from_parts(12_309_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1001 w:1000) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1000 w:1000) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1001 w:1000) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1000 w:1000) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn destroy_accounts(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + c * (208 ±0)` // Estimated: `3675 + c * (2609 ±0)` - // Minimum execution time: 18_728_000 picoseconds. - Weight::from_parts(18_982_000, 3675) - // Standard Error: 11_708 - .saturating_add(Weight::from_parts(14_363_570, 0).saturating_mul(c.into())) + // Minimum execution time: 15_798_000 picoseconds. + Weight::from_parts(16_005_000, 3675) + // Standard Error: 7_818 + .saturating_add(Weight::from_parts(12_826_278, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2609).saturating_mul(c.into())) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1001 w:1000) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1001 w:1000) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 1000]`. fn destroy_approvals(a: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `522 + a * (86 ±0)` // Estimated: `3675 + a * (2623 ±0)` - // Minimum execution time: 18_611_000 picoseconds. - Weight::from_parts(18_970_000, 3675) - // Standard Error: 13_224 - .saturating_add(Weight::from_parts(16_397_299, 0).saturating_mul(a.into())) + // Minimum execution time: 16_334_000 picoseconds. + Weight::from_parts(16_616_000, 3675) + // Standard Error: 6_402 + .saturating_add(Weight::from_parts(13_502_238, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(a.into()))) .saturating_add(Weight::from_parts(0, 2623).saturating_mul(a.into())) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:0) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:0) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn finish_destroy() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 14_504_000 picoseconds. - Weight::from_parts(14_906_000, 3675) + // Minimum execution time: 12_278_000 picoseconds. + Weight::from_parts(12_834_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn mint() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 26_653_000 picoseconds. - Weight::from_parts(27_260_000, 3675) + // Minimum execution time: 21_793_000 picoseconds. + Weight::from_parts(22_284_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn burn() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 33_625_000 picoseconds. - Weight::from_parts(34_474_000, 3675) + // Minimum execution time: 28_712_000 picoseconds. + Weight::from_parts(29_710_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 47_609_000 picoseconds. - Weight::from_parts(48_476_000, 6208) + // Minimum execution time: 41_331_000 picoseconds. + Weight::from_parts(42_362_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 41_625_000 picoseconds. - Weight::from_parts(43_030_000, 6208) + // Minimum execution time: 37_316_000 picoseconds. + Weight::from_parts(38_200_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 47_661_000 picoseconds. - Weight::from_parts(48_469_000, 6208) + // Minimum execution time: 41_347_000 picoseconds. + Weight::from_parts(42_625_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 17_727_000 picoseconds. - Weight::from_parts(18_384_000, 3675) + // Minimum execution time: 15_072_000 picoseconds. + Weight::from_parts(15_925_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn thaw() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 17_657_000 picoseconds. - Weight::from_parts(18_282_000, 3675) + // Minimum execution time: 14_991_000 picoseconds. + Weight::from_parts(15_987_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn freeze_asset() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 13_743_000 picoseconds. - Weight::from_parts(14_193_000, 3675) + // Minimum execution time: 11_296_000 picoseconds. + Weight::from_parts(12_052_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn thaw_asset() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 13_653_000 picoseconds. - Weight::from_parts(14_263_000, 3675) + // Minimum execution time: 11_446_000 picoseconds. + Weight::from_parts(11_791_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:0) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:0) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 15_328_000 picoseconds. - Weight::from_parts(16_042_000, 3675) + // Minimum execution time: 13_134_000 picoseconds. + Weight::from_parts(13_401_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn set_team() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 14_097_000 picoseconds. - Weight::from_parts(14_641_000, 3675) + // Minimum execution time: 11_395_000 picoseconds. + Weight::from_parts(11_718_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, _s: u32, ) -> Weight { + fn set_metadata(n: u32, _s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 29_535_000 picoseconds. - Weight::from_parts(31_456_892, 3675) + // Minimum execution time: 25_147_000 picoseconds. + Weight::from_parts(26_614_272, 3675) + // Standard Error: 959 + .saturating_add(Weight::from_parts(2_300, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 30_680_000 picoseconds. - Weight::from_parts(31_930_000, 3675) + // Minimum execution time: 26_094_000 picoseconds. + Weight::from_parts(27_199_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(_n: u32, s: u32, ) -> Weight { + fn force_set_metadata(n: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `190` // Estimated: `3675` - // Minimum execution time: 14_660_000 picoseconds. - Weight::from_parts(15_718_387, 3675) - // Standard Error: 622 - .saturating_add(Weight::from_parts(2_640, 0).saturating_mul(s.into())) + // Minimum execution time: 11_977_000 picoseconds. + Weight::from_parts(12_719_933, 3675) + // Standard Error: 429 + .saturating_add(Weight::from_parts(3_239, 0).saturating_mul(n.into())) + // Standard Error: 429 + .saturating_add(Weight::from_parts(3_941, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn force_clear_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 30_853_000 picoseconds. - Weight::from_parts(31_483_000, 3675) + // Minimum execution time: 25_859_000 picoseconds. + Weight::from_parts(26_654_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn force_asset_status() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 13_632_000 picoseconds. - Weight::from_parts(14_077_000, 3675) + // Minimum execution time: 10_965_000 picoseconds. + Weight::from_parts(11_595_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) fn approve_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 33_780_000 picoseconds. - Weight::from_parts(34_533_000, 3675) + // Minimum execution time: 28_427_000 picoseconds. + Weight::from_parts(29_150_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_approved() -> Weight { // Proof Size summary in bytes: // Measured: `668` // Estimated: `6208` - // Minimum execution time: 67_712_000 picoseconds. - Weight::from_parts(69_946_000, 6208) + // Minimum execution time: 60_227_000 picoseconds. + Weight::from_parts(61_839_000, 6208) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) fn cancel_approval() -> Weight { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 36_668_000 picoseconds. - Weight::from_parts(37_637_000, 3675) + // Minimum execution time: 30_738_000 picoseconds. + Weight::from_parts(31_988_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) fn force_cancel_approval() -> Weight { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 36_685_000 picoseconds. - Weight::from_parts(37_950_000, 3675) + // Minimum execution time: 31_444_000 picoseconds. + Weight::from_parts(32_126_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn set_min_balance() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 14_466_000 picoseconds. - Weight::from_parts(14_924_000, 3675) + // Minimum execution time: 11_890_000 picoseconds. + Weight::from_parts(12_462_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn touch() -> Weight { // Proof Size summary in bytes: // Measured: `453` // Estimated: `3675` - // Minimum execution time: 34_874_000 picoseconds. - Weight::from_parts(36_330_000, 3675) + // Minimum execution time: 30_675_000 picoseconds. + Weight::from_parts(31_749_000, 3675) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn touch_other() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 33_278_000 picoseconds. - Weight::from_parts(34_104_000, 3675) + // Minimum execution time: 28_290_000 picoseconds. + Weight::from_parts(29_405_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn refund() -> Weight { // Proof Size summary in bytes: // Measured: `579` // Estimated: `3675` - // Minimum execution time: 32_898_000 picoseconds. - Weight::from_parts(33_489_000, 3675) + // Minimum execution time: 30_195_000 picoseconds. + Weight::from_parts(31_105_000, 3675) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn refund_other() -> Weight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `3675` - // Minimum execution time: 31_243_000 picoseconds. - Weight::from_parts(31_909_000, 3675) + // Minimum execution time: 27_785_000 picoseconds. + Weight::from_parts(28_783_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn block() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 17_692_000 picoseconds. - Weight::from_parts(18_253_000, 3675) + // Minimum execution time: 15_318_000 picoseconds. + Weight::from_parts(16_113_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index b693f4fce9bde662c8badee8f0e675648597ea58..1ba0cdd0cc12e91ce11a545a40f7d0288bd3bbe0 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -37,8 +37,9 @@ use sp_core::{ use sp_io; use sp_runtime::{ curve::PiecewiseLinear, + generic::UncheckedExtrinsic, impl_opaque_keys, - testing::{Digest, DigestItem, Header, TestXt}, + testing::{Digest, DigestItem, Header}, traits::{Header as _, OpaqueKeys}, BuildStorage, Perbill, }; @@ -74,7 +75,7 @@ where RuntimeCall: From, { type OverarchingCall = RuntimeCall; - type Extrinsic = TestXt; + type Extrinsic = UncheckedExtrinsic; } impl_opaque_keys! { diff --git a/substrate/frame/bags-list/src/weights.rs b/substrate/frame/bags-list/src/weights.rs index d929c6bb95963c3eaee8ce0d16b127adcb64956e..804b702cc9a12c9f6a371b1ad019fd96038c7bb5 100644 --- a/substrate/frame/bags-list/src/weights.rs +++ b/substrate/frame/bags-list/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_bags_list +//! Autogenerated weights for `pallet_bags_list` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/bags-list/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/bags-list/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,123 +49,123 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_bags_list. +/// Weight functions needed for `pallet_bags_list`. pub trait WeightInfo { fn rebag_non_terminal() -> Weight; fn rebag_terminal() -> Weight; fn put_in_front_of() -> Weight; } -/// Weights for pallet_bags_list using the Substrate node and recommended hardware. +/// Weights for `pallet_bags_list` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:4 w:4) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:4 w:4) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_non_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1724` + // Measured: `1719` // Estimated: `11506` - // Minimum execution time: 62_137_000 picoseconds. - Weight::from_parts(64_050_000, 11506) + // Minimum execution time: 55_856_000 picoseconds. + Weight::from_parts(59_022_000, 11506) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1618` + // Measured: `1613` // Estimated: `8877` - // Minimum execution time: 60_880_000 picoseconds. - Weight::from_parts(62_078_000, 8877) + // Minimum execution time: 55_418_000 picoseconds. + Weight::from_parts(57_352_000, 8877) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: VoterList ListNodes (r:4 w:4) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:2 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `VoterList::ListNodes` (r:4 w:4) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:2 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn put_in_front_of() -> Weight { // Proof Size summary in bytes: - // Measured: `1930` + // Measured: `1925` // Estimated: `11506` - // Minimum execution time: 68_911_000 picoseconds. - Weight::from_parts(70_592_000, 11506) + // Minimum execution time: 63_820_000 picoseconds. + Weight::from_parts(65_530_000, 11506) .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:4 w:4) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:4 w:4) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_non_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1724` + // Measured: `1719` // Estimated: `11506` - // Minimum execution time: 62_137_000 picoseconds. - Weight::from_parts(64_050_000, 11506) + // Minimum execution time: 55_856_000 picoseconds. + Weight::from_parts(59_022_000, 11506) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1618` + // Measured: `1613` // Estimated: `8877` - // Minimum execution time: 60_880_000 picoseconds. - Weight::from_parts(62_078_000, 8877) + // Minimum execution time: 55_418_000 picoseconds. + Weight::from_parts(57_352_000, 8877) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: VoterList ListNodes (r:4 w:4) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:2 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `VoterList::ListNodes` (r:4 w:4) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:2 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn put_in_front_of() -> Weight { // Proof Size summary in bytes: - // Measured: `1930` + // Measured: `1925` // Estimated: `11506` - // Minimum execution time: 68_911_000 picoseconds. - Weight::from_parts(70_592_000, 11506) + // Minimum execution time: 63_820_000 picoseconds. + Weight::from_parts(65_530_000, 11506) .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 64ae90c67575fc1dcd139b7ed6dcbaa1c3ffda59..db114290ed8650bd59a105ff350cf7291e6b9830 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -53,6 +53,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] try-runtime = [ diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 7dd087eabd6343df653905856c078d3f6f2a3908..e87aa6f9311e30311066e21e73e8e006787fed90 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -320,7 +320,7 @@ pub mod pallet { type MaxFreezes: Get; } - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: frame_support::traits::StorageVersion = frame_support::traits::StorageVersion::new(1); diff --git a/substrate/frame/balances/src/migration.rs b/substrate/frame/balances/src/migration.rs index ba6819ec6e814da69807c8717a088df2451b6cb5..38d9c07ff7e002f6a7a57189b7ebce6830b52c75 100644 --- a/substrate/frame/balances/src/migration.rs +++ b/substrate/frame/balances/src/migration.rs @@ -22,9 +22,9 @@ use frame_support::{ }; fn migrate_v0_to_v1, I: 'static>(accounts: &[T::AccountId]) -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 0 { + if on_chain_version == 0 { let total = accounts .iter() .map(|a| Pallet::::total_balance(a)) @@ -76,9 +76,9 @@ impl, A: Get>, I: 'static> OnRuntimeUpgrade pub struct ResetInactive(PhantomData<(T, I)>); impl, I: 'static> OnRuntimeUpgrade for ResetInactive { fn on_runtime_upgrade() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 1 { + if on_chain_version == 1 { // Remove the old `StorageVersion` type. frame_support::storage::unhashed::kill(&frame_support::storage::storage_prefix( Pallet::::name().as_bytes(), diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs index 46a4c4caefc3d72ad45adf58243f3430344d4813..10bb79901f8c2a63f41289b061416ebcf51b8997 100644 --- a/substrate/frame/balances/src/tests/currency_tests.rs +++ b/substrate/frame/balances/src/tests/currency_tests.rs @@ -30,6 +30,7 @@ use frame_support::{ StorageNoopGuard, }; use frame_system::Event as SysEvent; +use sp_runtime::traits::DispatchTransaction; const ID_1: LockIdentifier = *b"1 "; const ID_2: LockIdentifier = *b"2 "; @@ -240,17 +241,17 @@ fn lock_should_work_reserve() { TokenError::Frozen ); assert_noop!(Balances::reserve(&1, 1), Error::::LiquidityRestrictions,); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(1), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, ) .is_err()); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(0), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, @@ -271,17 +272,17 @@ fn lock_should_work_tx_fee() { TokenError::Frozen ); assert_noop!(Balances::reserve(&1, 1), Error::::LiquidityRestrictions,); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(1), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, ) .is_err()); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(0), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, diff --git a/substrate/frame/balances/src/tests/mod.rs b/substrate/frame/balances/src/tests/mod.rs index 599909fa94351b748ce7ea0514cc574347ea6a7a..ee076a10fd1085b9c9e6830606e40993db8b3364 100644 --- a/substrate/frame/balances/src/tests/mod.rs +++ b/substrate/frame/balances/src/tests/mod.rs @@ -37,7 +37,7 @@ use scale_info::TypeInfo; use sp_core::hexdisplay::HexDisplay; use sp_io; use sp_runtime::{ - traits::{BadOrigin, SignedExtension, Zero}, + traits::{BadOrigin, Zero}, ArithmeticError, BuildStorage, DispatchError, DispatchResult, FixedPointNumber, RuntimeDebug, TokenError, }; @@ -96,13 +96,13 @@ impl frame_system::Config for Test { type AccountData = super::AccountData; } +#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig as pallet_transaction_payment::DefaultConfig)] impl pallet_transaction_payment::Config for Test { type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = CurrencyAdapter, ()>; type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; type LengthToFee = IdentityFee; - type FeeMultiplierUpdate = (); } pub(crate) type Balance = u64; diff --git a/substrate/frame/balances/src/weights.rs b/substrate/frame/balances/src/weights.rs index f875ea189ba1bce61b5fd6aec0c3aab54fb72bf2..6bcfe050af714326aded0eda4fe05c1d5c149e31 100644 --- a/substrate/frame/balances/src/weights.rs +++ b/substrate/frame/balances/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_balances` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_balances +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/balances/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -69,8 +71,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 46_329_000 picoseconds. - Weight::from_parts(47_297_000, 3593) + // Minimum execution time: 46_407_000 picoseconds. + Weight::from_parts(47_561_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -80,8 +82,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 36_187_000 picoseconds. - Weight::from_parts(36_900_000, 3593) + // Minimum execution time: 36_574_000 picoseconds. + Weight::from_parts(37_682_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -91,8 +93,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 13_498_000 picoseconds. - Weight::from_parts(14_143_000, 3593) + // Minimum execution time: 13_990_000 picoseconds. + Weight::from_parts(14_568_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -102,8 +104,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 18_756_000 picoseconds. - Weight::from_parts(19_553_000, 3593) + // Minimum execution time: 19_594_000 picoseconds. + Weight::from_parts(20_148_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -113,8 +115,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6196` - // Minimum execution time: 47_826_000 picoseconds. - Weight::from_parts(48_834_000, 6196) + // Minimum execution time: 47_945_000 picoseconds. + Weight::from_parts(49_363_000, 6196) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -124,8 +126,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 44_621_000 picoseconds. - Weight::from_parts(45_151_000, 3593) + // Minimum execution time: 45_205_000 picoseconds. + Weight::from_parts(45_952_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -135,8 +137,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 16_194_000 picoseconds. - Weight::from_parts(16_945_000, 3593) + // Minimum execution time: 16_593_000 picoseconds. + Weight::from_parts(17_123_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -147,10 +149,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + u * (135 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 15_782_000 picoseconds. - Weight::from_parts(16_118_000, 990) - // Standard Error: 10_499 - .saturating_add(Weight::from_parts(13_327_660, 0).saturating_mul(u.into())) + // Minimum execution time: 16_182_000 picoseconds. + Weight::from_parts(16_412_000, 990) + // Standard Error: 10_148 + .saturating_add(Weight::from_parts(13_382_459, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -159,8 +161,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_157_000 picoseconds. - Weight::from_parts(6_507_000, 0) + // Minimum execution time: 6_478_000 picoseconds. + Weight::from_parts(6_830_000, 0) } } @@ -172,8 +174,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 46_329_000 picoseconds. - Weight::from_parts(47_297_000, 3593) + // Minimum execution time: 46_407_000 picoseconds. + Weight::from_parts(47_561_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -183,8 +185,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 36_187_000 picoseconds. - Weight::from_parts(36_900_000, 3593) + // Minimum execution time: 36_574_000 picoseconds. + Weight::from_parts(37_682_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -194,8 +196,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 13_498_000 picoseconds. - Weight::from_parts(14_143_000, 3593) + // Minimum execution time: 13_990_000 picoseconds. + Weight::from_parts(14_568_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -205,8 +207,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 18_756_000 picoseconds. - Weight::from_parts(19_553_000, 3593) + // Minimum execution time: 19_594_000 picoseconds. + Weight::from_parts(20_148_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -216,8 +218,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6196` - // Minimum execution time: 47_826_000 picoseconds. - Weight::from_parts(48_834_000, 6196) + // Minimum execution time: 47_945_000 picoseconds. + Weight::from_parts(49_363_000, 6196) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -227,8 +229,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 44_621_000 picoseconds. - Weight::from_parts(45_151_000, 3593) + // Minimum execution time: 45_205_000 picoseconds. + Weight::from_parts(45_952_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -238,8 +240,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 16_194_000 picoseconds. - Weight::from_parts(16_945_000, 3593) + // Minimum execution time: 16_593_000 picoseconds. + Weight::from_parts(17_123_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -250,10 +252,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + u * (135 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 15_782_000 picoseconds. - Weight::from_parts(16_118_000, 990) - // Standard Error: 10_499 - .saturating_add(Weight::from_parts(13_327_660, 0).saturating_mul(u.into())) + // Minimum execution time: 16_182_000 picoseconds. + Weight::from_parts(16_412_000, 990) + // Standard Error: 10_148 + .saturating_add(Weight::from_parts(13_382_459, 0).saturating_mul(u.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -262,7 +264,7 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_157_000 picoseconds. - Weight::from_parts(6_507_000, 0) + // Minimum execution time: 6_478_000 picoseconds. + Weight::from_parts(6_830_000, 0) } } diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 9cce479890a48f7753be76b809f13abc1baa045d..5d963d309a4966cae1cb77516dda51c98b7154bf 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -29,8 +29,8 @@ use pallet_session::historical as pallet_session_historical; use sp_core::{crypto::KeyTypeId, ConstU128}; use sp_io::TestExternalities; use sp_runtime::{ - app_crypto::ecdsa::Public, curve::PiecewiseLinear, impl_opaque_keys, testing::TestXt, - traits::OpaqueKeys, BuildStorage, Perbill, + app_crypto::ecdsa::Public, curve::PiecewiseLinear, generic::UncheckedExtrinsic, + impl_opaque_keys, traits::OpaqueKeys, BuildStorage, Perbill, }; use sp_staking::{EraIndex, SessionIndex}; use sp_state_machine::BasicExternalities; @@ -73,7 +73,7 @@ where RuntimeCall: From, { type OverarchingCall = RuntimeCall; - type Extrinsic = TestXt; + type Extrinsic = UncheckedExtrinsic; } parameter_types! { diff --git a/substrate/frame/benchmarking/src/weights.rs b/substrate/frame/benchmarking/src/weights.rs index 13d73e420cce256080d117e3bab56842d302b3a0..76f9a3fe52b564d552b698ac94e44e8c98660deb 100644 --- a/substrate/frame/benchmarking/src/weights.rs +++ b/substrate/frame/benchmarking/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for frame_benchmarking +//! Autogenerated weights for `frame_benchmarking` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/benchmarking/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/benchmarking/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for frame_benchmarking. +/// Weight functions needed for `frame_benchmarking`. pub trait WeightInfo { fn addition(i: u32, ) -> Weight; fn subtraction(i: u32, ) -> Weight; @@ -60,7 +59,7 @@ pub trait WeightInfo { fn sr25519_verification(i: u32, ) -> Weight; } -/// Weights for frame_benchmarking using the Substrate node and recommended hardware. +/// Weights for `frame_benchmarking` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 1000000]`. @@ -68,101 +67,101 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 147_000 picoseconds. - Weight::from_parts(185_656, 0) + // Minimum execution time: 150_000 picoseconds. + Weight::from_parts(190_440, 0) } /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 146_000 picoseconds. - Weight::from_parts(189_816, 0) + // Minimum execution time: 151_000 picoseconds. + Weight::from_parts(201_397, 0) } /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 148_000 picoseconds. - Weight::from_parts(202_367, 0) + // Minimum execution time: 152_000 picoseconds. + Weight::from_parts(187_862, 0) } /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 143_000 picoseconds. - Weight::from_parts(189_693, 0) + // Minimum execution time: 146_000 picoseconds. + Weight::from_parts(215_191, 0) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 24_167_071_000 picoseconds. - Weight::from_parts(24_391_749_000, 0) + // Minimum execution time: 25_432_823_000 picoseconds. + Weight::from_parts(25_568_846_000, 0) } /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 231_000 picoseconds. - Weight::from_parts(2_998_013, 0) - // Standard Error: 6_256 - .saturating_add(Weight::from_parts(55_456_705, 0).saturating_mul(i.into())) + // Minimum execution time: 193_000 picoseconds. + Weight::from_parts(3_846_690, 0) + // Standard Error: 6_694 + .saturating_add(Weight::from_parts(40_647_582, 0).saturating_mul(i.into())) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { /// The range of component `i` is `[0, 1000000]`. fn addition(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 147_000 picoseconds. - Weight::from_parts(185_656, 0) + // Minimum execution time: 150_000 picoseconds. + Weight::from_parts(190_440, 0) } /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 146_000 picoseconds. - Weight::from_parts(189_816, 0) + // Minimum execution time: 151_000 picoseconds. + Weight::from_parts(201_397, 0) } /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 148_000 picoseconds. - Weight::from_parts(202_367, 0) + // Minimum execution time: 152_000 picoseconds. + Weight::from_parts(187_862, 0) } /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 143_000 picoseconds. - Weight::from_parts(189_693, 0) + // Minimum execution time: 146_000 picoseconds. + Weight::from_parts(215_191, 0) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 24_167_071_000 picoseconds. - Weight::from_parts(24_391_749_000, 0) + // Minimum execution time: 25_432_823_000 picoseconds. + Weight::from_parts(25_568_846_000, 0) } /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 231_000 picoseconds. - Weight::from_parts(2_998_013, 0) - // Standard Error: 6_256 - .saturating_add(Weight::from_parts(55_456_705, 0).saturating_mul(i.into())) + // Minimum execution time: 193_000 picoseconds. + Weight::from_parts(3_846_690, 0) + // Standard Error: 6_694 + .saturating_add(Weight::from_parts(40_647_582, 0).saturating_mul(i.into())) } } diff --git a/substrate/frame/bounties/src/weights.rs b/substrate/frame/bounties/src/weights.rs index a172d15b56cc9a2a4686e3d6af61d9560635d5be..1f0e38b6702e99c4b27229c740abf76acd03f309 100644 --- a/substrate/frame/bounties/src/weights.rs +++ b/substrate/frame/bounties/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_bounties +//! Autogenerated weights for `pallet_bounties` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/bounties/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/bounties/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_bounties. +/// Weight functions needed for `pallet_bounties`. pub trait WeightInfo { fn propose_bounty(d: u32, ) -> Weight; fn approve_bounty() -> Weight; @@ -65,169 +64,169 @@ pub trait WeightInfo { fn spend_funds(b: u32, ) -> Weight; } -/// Weights for pallet_bounties using the Substrate node and recommended hardware. +/// Weights for `pallet_bounties` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Bounties BountyCount (r:1 w:1) - /// Proof: Bounties BountyCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:0 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyCount` (r:1 w:1) + /// Proof: `Bounties::BountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:0 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 300]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `276` + // Measured: `309` // Estimated: `3593` - // Minimum execution time: 29_384_000 picoseconds. - Weight::from_parts(30_820_018, 3593) - // Standard Error: 298 - .saturating_add(Weight::from_parts(2_920, 0).saturating_mul(d.into())) + // Minimum execution time: 24_286_000 picoseconds. + Weight::from_parts(25_657_314, 3593) + // Standard Error: 215 + .saturating_add(Weight::from_parts(1_116, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `368` + // Measured: `401` // Estimated: `3642` - // Minimum execution time: 10_873_000 picoseconds. - Weight::from_parts(11_421_000, 3642) + // Minimum execution time: 12_526_000 picoseconds. + Weight::from_parts(13_373_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `388` + // Measured: `421` // Estimated: `3642` - // Minimum execution time: 9_181_000 picoseconds. - Weight::from_parts(9_726_000, 3642) + // Minimum execution time: 11_807_000 picoseconds. + Weight::from_parts(12_340_000, 3642) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `564` + // Measured: `597` // Estimated: `3642` - // Minimum execution time: 30_257_000 picoseconds. - Weight::from_parts(30_751_000, 3642) + // Minimum execution time: 27_183_000 picoseconds. + Weight::from_parts(28_250_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `560` + // Measured: `593` // Estimated: `3642` - // Minimum execution time: 27_850_000 picoseconds. - Weight::from_parts(28_821_000, 3642) + // Minimum execution time: 26_775_000 picoseconds. + Weight::from_parts(27_667_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `572` + // Measured: `605` // Estimated: `3642` - // Minimum execution time: 19_164_000 picoseconds. - Weight::from_parts(20_136_000, 3642) + // Minimum execution time: 16_089_000 picoseconds. + Weight::from_parts(16_909_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:3 w:3) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `936` + // Measured: `969` // Estimated: `8799` - // Minimum execution time: 120_235_000 picoseconds. - Weight::from_parts(121_673_000, 8799) + // Minimum execution time: 104_973_000 picoseconds. + Weight::from_parts(107_696_000, 8799) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `616` + // Measured: `649` // Estimated: `3642` - // Minimum execution time: 35_713_000 picoseconds. - Weight::from_parts(37_174_000, 3642) + // Minimum execution time: 30_702_000 picoseconds. + Weight::from_parts(32_615_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `852` + // Measured: `885` // Estimated: `6196` - // Minimum execution time: 81_037_000 picoseconds. - Weight::from_parts(83_294_000, 6196) + // Minimum execution time: 72_055_000 picoseconds. + Weight::from_parts(73_900_000, 6196) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn extend_bounty_expiry() -> Weight { // Proof Size summary in bytes: - // Measured: `424` + // Measured: `457` // Estimated: `3642` - // Minimum execution time: 15_348_000 picoseconds. - Weight::from_parts(15_776_000, 3642) + // Minimum execution time: 12_057_000 picoseconds. + Weight::from_parts(12_744_000, 3642) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:100 w:100) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:200 w:200) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:100 w:100) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:200 w:200) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 100]`. fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `4 + b * (297 ±0)` + // Measured: `37 + b * (297 ±0)` // Estimated: `1887 + b * (5206 ±0)` - // Minimum execution time: 5_082_000 picoseconds. - Weight::from_parts(5_126_000, 1887) - // Standard Error: 21_949 - .saturating_add(Weight::from_parts(42_635_308, 0).saturating_mul(b.into())) + // Minimum execution time: 3_489_000 picoseconds. + Weight::from_parts(8_384_129, 1887) + // Standard Error: 18_066 + .saturating_add(Weight::from_parts(31_612_331, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -236,168 +235,168 @@ impl WeightInfo for SubstrateWeight { } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Bounties BountyCount (r:1 w:1) - /// Proof: Bounties BountyCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:0 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyCount` (r:1 w:1) + /// Proof: `Bounties::BountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:0 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 300]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `276` + // Measured: `309` // Estimated: `3593` - // Minimum execution time: 29_384_000 picoseconds. - Weight::from_parts(30_820_018, 3593) - // Standard Error: 298 - .saturating_add(Weight::from_parts(2_920, 0).saturating_mul(d.into())) + // Minimum execution time: 24_286_000 picoseconds. + Weight::from_parts(25_657_314, 3593) + // Standard Error: 215 + .saturating_add(Weight::from_parts(1_116, 0).saturating_mul(d.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `368` + // Measured: `401` // Estimated: `3642` - // Minimum execution time: 10_873_000 picoseconds. - Weight::from_parts(11_421_000, 3642) + // Minimum execution time: 12_526_000 picoseconds. + Weight::from_parts(13_373_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `388` + // Measured: `421` // Estimated: `3642` - // Minimum execution time: 9_181_000 picoseconds. - Weight::from_parts(9_726_000, 3642) + // Minimum execution time: 11_807_000 picoseconds. + Weight::from_parts(12_340_000, 3642) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `564` + // Measured: `597` // Estimated: `3642` - // Minimum execution time: 30_257_000 picoseconds. - Weight::from_parts(30_751_000, 3642) + // Minimum execution time: 27_183_000 picoseconds. + Weight::from_parts(28_250_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `560` + // Measured: `593` // Estimated: `3642` - // Minimum execution time: 27_850_000 picoseconds. - Weight::from_parts(28_821_000, 3642) + // Minimum execution time: 26_775_000 picoseconds. + Weight::from_parts(27_667_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `572` + // Measured: `605` // Estimated: `3642` - // Minimum execution time: 19_164_000 picoseconds. - Weight::from_parts(20_136_000, 3642) + // Minimum execution time: 16_089_000 picoseconds. + Weight::from_parts(16_909_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:3 w:3) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `936` + // Measured: `969` // Estimated: `8799` - // Minimum execution time: 120_235_000 picoseconds. - Weight::from_parts(121_673_000, 8799) + // Minimum execution time: 104_973_000 picoseconds. + Weight::from_parts(107_696_000, 8799) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `616` + // Measured: `649` // Estimated: `3642` - // Minimum execution time: 35_713_000 picoseconds. - Weight::from_parts(37_174_000, 3642) + // Minimum execution time: 30_702_000 picoseconds. + Weight::from_parts(32_615_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `852` + // Measured: `885` // Estimated: `6196` - // Minimum execution time: 81_037_000 picoseconds. - Weight::from_parts(83_294_000, 6196) + // Minimum execution time: 72_055_000 picoseconds. + Weight::from_parts(73_900_000, 6196) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn extend_bounty_expiry() -> Weight { // Proof Size summary in bytes: - // Measured: `424` + // Measured: `457` // Estimated: `3642` - // Minimum execution time: 15_348_000 picoseconds. - Weight::from_parts(15_776_000, 3642) + // Minimum execution time: 12_057_000 picoseconds. + Weight::from_parts(12_744_000, 3642) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:100 w:100) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:200 w:200) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:100 w:100) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:200 w:200) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 100]`. fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `4 + b * (297 ±0)` + // Measured: `37 + b * (297 ±0)` // Estimated: `1887 + b * (5206 ±0)` - // Minimum execution time: 5_082_000 picoseconds. - Weight::from_parts(5_126_000, 1887) - // Standard Error: 21_949 - .saturating_add(Weight::from_parts(42_635_308, 0).saturating_mul(b.into())) + // Minimum execution time: 3_489_000 picoseconds. + Weight::from_parts(8_384_129, 1887) + // Standard Error: 18_066 + .saturating_add(Weight::from_parts(31_612_331, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) diff --git a/substrate/frame/broker/src/weights.rs b/substrate/frame/broker/src/weights.rs index a8f50eeee6e6ceaf284c1764a68411956753cfff..133ea63e35f7539c69d07cd4d442a4ce97aab80f 100644 --- a/substrate/frame/broker/src/weights.rs +++ b/substrate/frame/broker/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_broker` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-pzhd7p6z-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_broker +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_broker -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/broker/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -87,8 +89,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_040_000 picoseconds. - Weight::from_parts(3_344_000, 0) + // Minimum execution time: 2_701_000 picoseconds. + Weight::from_parts(2_902_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -97,8 +99,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 21_259_000 picoseconds. - Weight::from_parts(22_110_000, 7496) + // Minimum execution time: 18_056_000 picoseconds. + Weight::from_parts(19_093_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -108,8 +110,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 20_330_000 picoseconds. - Weight::from_parts(20_826_000, 7496) + // Minimum execution time: 17_233_000 picoseconds. + Weight::from_parts(17_788_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -119,8 +121,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 13_411_000 picoseconds. - Weight::from_parts(13_960_000, 1526) + // Minimum execution time: 9_740_000 picoseconds. + Weight::from_parts(10_504_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -139,14 +141,12 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(n: u32, ) -> Weight { + fn start_sales(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 57_770_000 picoseconds. - Weight::from_parts(61_047_512, 8499) - // Standard Error: 165 - .saturating_add(Weight::from_parts(3, 0).saturating_mul(n.into())) + // Minimum execution time: 49_728_000 picoseconds. + Weight::from_parts(52_765_861, 8499) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(16_u64)) } @@ -162,10 +162,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `568` - // Estimated: `2053` - // Minimum execution time: 51_196_000 picoseconds. - Weight::from_parts(52_382_000, 2053) + // Measured: `635` + // Estimated: `2120` + // Minimum execution time: 41_986_000 picoseconds. + Weight::from_parts(43_465_000, 2120) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -185,10 +185,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `686` + // Measured: `753` // Estimated: `4698` - // Minimum execution time: 71_636_000 picoseconds. - Weight::from_parts(73_679_000, 4698) + // Minimum execution time: 61_779_000 picoseconds. + Weight::from_parts(62_563_000, 4698) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -198,8 +198,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 19_182_000 picoseconds. - Weight::from_parts(19_775_000, 3550) + // Minimum execution time: 16_962_000 picoseconds. + Weight::from_parts(17_733_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -209,21 +209,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 20_688_000 picoseconds. - Weight::from_parts(21_557_000, 3550) + // Minimum execution time: 18_380_000 picoseconds. + Weight::from_parts(19_105_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: `Broker::Regions` (r:1 w:2) + /// Storage: `Broker::Regions` (r:1 w:3) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 21_190_000 picoseconds. - Weight::from_parts(22_215_000, 3550) + // Minimum execution time: 20_115_000 picoseconds. + Weight::from_parts(20_741_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -237,8 +237,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `740` // Estimated: `4681` - // Minimum execution time: 34_591_000 picoseconds. - Weight::from_parts(36_227_000, 4681) + // Minimum execution time: 31_339_000 picoseconds. + Weight::from_parts(32_639_000, 4681) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -256,8 +256,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `775` // Estimated: `5996` - // Minimum execution time: 40_346_000 picoseconds. - Weight::from_parts(41_951_000, 5996) + // Minimum execution time: 37_542_000 picoseconds. + Weight::from_parts(38_521_000, 5996) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -272,10 +272,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 75_734_000 picoseconds. - Weight::from_parts(78_168_395, 6196) - // Standard Error: 63_180 - .saturating_add(Weight::from_parts(1_076_259, 0).saturating_mul(m.into())) + // Minimum execution time: 66_176_000 picoseconds. + Weight::from_parts(68_356_745, 6196) + // Standard Error: 68_008 + .saturating_add(Weight::from_parts(1_558_419, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -287,8 +287,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 46_383_000 picoseconds. - Weight::from_parts(47_405_000, 3593) + // Minimum execution time: 41_130_000 picoseconds. + Weight::from_parts(41_914_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -300,8 +300,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `603` // Estimated: `3550` - // Minimum execution time: 30_994_000 picoseconds. - Weight::from_parts(31_979_000, 3550) + // Minimum execution time: 31_042_000 picoseconds. + Weight::from_parts(34_087_000, 3550) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -315,8 +315,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 37_584_000 picoseconds. - Weight::from_parts(44_010_000, 3533) + // Minimum execution time: 39_116_000 picoseconds. + Weight::from_parts(39_990_000, 3533) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -330,10 +330,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `830` + // Measured: `995` // Estimated: `3593` - // Minimum execution time: 45_266_000 picoseconds. - Weight::from_parts(48_000_000, 3593) + // Minimum execution time: 47_547_000 picoseconds. + Weight::from_parts(50_274_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -343,34 +343,32 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: - // Measured: `525` + // Measured: `661` // Estimated: `4698` - // Minimum execution time: 25_365_000 picoseconds. - Weight::from_parts(26_920_000, 4698) + // Minimum execution time: 26_707_000 picoseconds. + Weight::from_parts(27_217_000, 4698) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// The range of component `n` is `[0, 1000]`. - fn request_core_count(n: u32, ) -> Weight { + fn request_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_519_000 picoseconds. - Weight::from_parts(7_098_698, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(8, 0).saturating_mul(n.into())) + // Minimum execution time: 4_651_000 picoseconds. + Weight::from_parts(5_231_385, 0) } - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn process_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `98` - // Estimated: `3563` - // Minimum execution time: 7_608_000 picoseconds. - Weight::from_parts(8_157_815, 3563) - // Standard Error: 26 - .saturating_add(Weight::from_parts(48, 0).saturating_mul(n.into())) + // Measured: `404` + // Estimated: `1487` + // Minimum execution time: 6_806_000 picoseconds. + Weight::from_parts(7_264_002, 1487) + // Standard Error: 21 + .saturating_add(Weight::from_parts(31, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -386,10 +384,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `905` - // Estimated: `4370` - // Minimum execution time: 59_993_000 picoseconds. - Weight::from_parts(61_752_000, 4370) + // Measured: `972` + // Estimated: `4437` + // Minimum execution time: 48_297_000 picoseconds. + Weight::from_parts(49_613_000, 4437) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -408,10 +406,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 41_863_000 picoseconds. - Weight::from_parts(44_033_031, 8499) - // Standard Error: 116 - .saturating_add(Weight::from_parts(764, 0).saturating_mul(n.into())) + // Minimum execution time: 36_715_000 picoseconds. + Weight::from_parts(38_580_380, 8499) + // Standard Error: 91 + .saturating_add(Weight::from_parts(1_163, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)) } @@ -423,8 +421,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 9_588_000 picoseconds. - Weight::from_parts(9_925_000, 3493) + // Minimum execution time: 7_564_000 picoseconds. + Weight::from_parts(7_932_000, 3493) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -436,8 +434,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 19_308_000 picoseconds. - Weight::from_parts(20_482_000, 4681) + // Minimum execution time: 17_082_000 picoseconds. + Weight::from_parts(17_662_000, 4681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -445,28 +443,35 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 147_000 picoseconds. - Weight::from_parts(184_000, 0) + // Minimum execution time: 175_000 picoseconds. + Weight::from_parts(223_000, 0) } + /// Storage: `Broker::CoreCountInbox` (r:0 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) fn notify_core_count() -> Weight { - T::DbWeight::get().reads_writes(1, 1) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_432_000 picoseconds. + Weight::from_parts(2_536_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:0) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `699` - // Estimated: `4164` - // Minimum execution time: 19_824_000 picoseconds. - Weight::from_parts(20_983_000, 4164) + // Measured: `603` + // Estimated: `4068` + // Minimum execution time: 13_080_000 picoseconds. + Weight::from_parts(13_937_000, 4068) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } } @@ -478,8 +483,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_040_000 picoseconds. - Weight::from_parts(3_344_000, 0) + // Minimum execution time: 2_701_000 picoseconds. + Weight::from_parts(2_902_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -488,8 +493,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 21_259_000 picoseconds. - Weight::from_parts(22_110_000, 7496) + // Minimum execution time: 18_056_000 picoseconds. + Weight::from_parts(19_093_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -499,8 +504,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 20_330_000 picoseconds. - Weight::from_parts(20_826_000, 7496) + // Minimum execution time: 17_233_000 picoseconds. + Weight::from_parts(17_788_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -510,8 +515,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 13_411_000 picoseconds. - Weight::from_parts(13_960_000, 1526) + // Minimum execution time: 9_740_000 picoseconds. + Weight::from_parts(10_504_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -530,14 +535,12 @@ impl WeightInfo for () { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(n: u32, ) -> Weight { + fn start_sales(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 57_770_000 picoseconds. - Weight::from_parts(61_047_512, 8499) - // Standard Error: 165 - .saturating_add(Weight::from_parts(3, 0).saturating_mul(n.into())) + // Minimum execution time: 49_728_000 picoseconds. + Weight::from_parts(52_765_861, 8499) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(16_u64)) } @@ -553,10 +556,10 @@ impl WeightInfo for () { /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `568` - // Estimated: `2053` - // Minimum execution time: 51_196_000 picoseconds. - Weight::from_parts(52_382_000, 2053) + // Measured: `635` + // Estimated: `2120` + // Minimum execution time: 41_986_000 picoseconds. + Weight::from_parts(43_465_000, 2120) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -576,10 +579,10 @@ impl WeightInfo for () { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `686` + // Measured: `753` // Estimated: `4698` - // Minimum execution time: 71_636_000 picoseconds. - Weight::from_parts(73_679_000, 4698) + // Minimum execution time: 61_779_000 picoseconds. + Weight::from_parts(62_563_000, 4698) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -589,8 +592,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 19_182_000 picoseconds. - Weight::from_parts(19_775_000, 3550) + // Minimum execution time: 16_962_000 picoseconds. + Weight::from_parts(17_733_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -600,21 +603,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 20_688_000 picoseconds. - Weight::from_parts(21_557_000, 3550) + // Minimum execution time: 18_380_000 picoseconds. + Weight::from_parts(19_105_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: `Broker::Regions` (r:1 w:2) + /// Storage: `Broker::Regions` (r:1 w:3) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 21_190_000 picoseconds. - Weight::from_parts(22_215_000, 3550) + // Minimum execution time: 20_115_000 picoseconds. + Weight::from_parts(20_741_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -628,8 +631,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `740` // Estimated: `4681` - // Minimum execution time: 34_591_000 picoseconds. - Weight::from_parts(36_227_000, 4681) + // Minimum execution time: 31_339_000 picoseconds. + Weight::from_parts(32_639_000, 4681) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -647,8 +650,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `775` // Estimated: `5996` - // Minimum execution time: 40_346_000 picoseconds. - Weight::from_parts(41_951_000, 5996) + // Minimum execution time: 37_542_000 picoseconds. + Weight::from_parts(38_521_000, 5996) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -663,10 +666,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 75_734_000 picoseconds. - Weight::from_parts(78_168_395, 6196) - // Standard Error: 63_180 - .saturating_add(Weight::from_parts(1_076_259, 0).saturating_mul(m.into())) + // Minimum execution time: 66_176_000 picoseconds. + Weight::from_parts(68_356_745, 6196) + // Standard Error: 68_008 + .saturating_add(Weight::from_parts(1_558_419, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -678,8 +681,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 46_383_000 picoseconds. - Weight::from_parts(47_405_000, 3593) + // Minimum execution time: 41_130_000 picoseconds. + Weight::from_parts(41_914_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -691,8 +694,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `603` // Estimated: `3550` - // Minimum execution time: 30_994_000 picoseconds. - Weight::from_parts(31_979_000, 3550) + // Minimum execution time: 31_042_000 picoseconds. + Weight::from_parts(34_087_000, 3550) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -706,8 +709,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 37_584_000 picoseconds. - Weight::from_parts(44_010_000, 3533) + // Minimum execution time: 39_116_000 picoseconds. + Weight::from_parts(39_990_000, 3533) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -721,10 +724,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `830` + // Measured: `995` // Estimated: `3593` - // Minimum execution time: 45_266_000 picoseconds. - Weight::from_parts(48_000_000, 3593) + // Minimum execution time: 47_547_000 picoseconds. + Weight::from_parts(50_274_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -734,34 +737,32 @@ impl WeightInfo for () { /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: - // Measured: `525` + // Measured: `661` // Estimated: `4698` - // Minimum execution time: 25_365_000 picoseconds. - Weight::from_parts(26_920_000, 4698) + // Minimum execution time: 26_707_000 picoseconds. + Weight::from_parts(27_217_000, 4698) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// The range of component `n` is `[0, 1000]`. - fn request_core_count(n: u32, ) -> Weight { + fn request_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_519_000 picoseconds. - Weight::from_parts(7_098_698, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(8, 0).saturating_mul(n.into())) + // Minimum execution time: 4_651_000 picoseconds. + Weight::from_parts(5_231_385, 0) } - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn process_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `98` - // Estimated: `3563` - // Minimum execution time: 7_608_000 picoseconds. - Weight::from_parts(8_157_815, 3563) - // Standard Error: 26 - .saturating_add(Weight::from_parts(48, 0).saturating_mul(n.into())) + // Measured: `404` + // Estimated: `1487` + // Minimum execution time: 6_806_000 picoseconds. + Weight::from_parts(7_264_002, 1487) + // Standard Error: 21 + .saturating_add(Weight::from_parts(31, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -777,10 +778,10 @@ impl WeightInfo for () { /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `905` - // Estimated: `4370` - // Minimum execution time: 59_993_000 picoseconds. - Weight::from_parts(61_752_000, 4370) + // Measured: `972` + // Estimated: `4437` + // Minimum execution time: 48_297_000 picoseconds. + Weight::from_parts(49_613_000, 4437) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -799,10 +800,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 41_863_000 picoseconds. - Weight::from_parts(44_033_031, 8499) - // Standard Error: 116 - .saturating_add(Weight::from_parts(764, 0).saturating_mul(n.into())) + // Minimum execution time: 36_715_000 picoseconds. + Weight::from_parts(38_580_380, 8499) + // Standard Error: 91 + .saturating_add(Weight::from_parts(1_163, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(15_u64)) } @@ -814,8 +815,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 9_588_000 picoseconds. - Weight::from_parts(9_925_000, 3493) + // Minimum execution time: 7_564_000 picoseconds. + Weight::from_parts(7_932_000, 3493) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -827,8 +828,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 19_308_000 picoseconds. - Weight::from_parts(20_482_000, 4681) + // Minimum execution time: 17_082_000 picoseconds. + Weight::from_parts(17_662_000, 4681) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -836,28 +837,34 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 147_000 picoseconds. - Weight::from_parts(184_000, 0) + // Minimum execution time: 175_000 picoseconds. + Weight::from_parts(223_000, 0) } + /// Storage: `Broker::CoreCountInbox` (r:0 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) fn notify_core_count() -> Weight { - RocksDbWeight::get().reads(1) - .saturating_add(RocksDbWeight::get().writes(1)) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_432_000 picoseconds. + Weight::from_parts(2_536_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:0) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `699` - // Estimated: `4164` - // Minimum execution time: 19_824_000 picoseconds. - Weight::from_parts(20_983_000, 4164) + // Measured: `603` + // Estimated: `4068` + // Minimum execution time: 13_080_000 picoseconds. + Weight::from_parts(13_937_000, 4068) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/substrate/frame/child-bounties/src/weights.rs b/substrate/frame/child-bounties/src/weights.rs index e4c1f238e88b7f94e177ab6005fca0beb3e91d1f..6427517b45b040a649e59c90c2a2bd920e1e7621 100644 --- a/substrate/frame/child-bounties/src/weights.rs +++ b/substrate/frame/child-bounties/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_child_bounties +//! Autogenerated weights for `pallet_child_bounties` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/child-bounties/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/child-bounties/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_child_bounties. +/// Weight functions needed for `pallet_child_bounties`. pub trait WeightInfo { fn add_child_bounty(d: u32, ) -> Weight; fn propose_curator() -> Weight; @@ -62,288 +61,288 @@ pub trait WeightInfo { fn close_child_bounty_active() -> Weight; } -/// Weights for pallet_child_bounties using the Substrate node and recommended hardware. +/// Weights for `pallet_child_bounties` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyCount (r:1 w:1) - /// Proof: ChildBounties ChildBountyCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:0 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyCount` (r:1 w:1) + /// Proof: `ChildBounties::ChildBountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:0 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 300]`. fn add_child_bounty(_d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `712` + // Measured: `745` // Estimated: `6196` - // Minimum execution time: 69_805_000 picoseconds. - Weight::from_parts(73_216_717, 6196) + // Minimum execution time: 60_674_000 picoseconds. + Weight::from_parts(63_477_428, 6196) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `766` + // Measured: `799` // Estimated: `3642` - // Minimum execution time: 18_190_000 picoseconds. - Weight::from_parts(18_932_000, 3642) + // Minimum execution time: 17_754_000 picoseconds. + Weight::from_parts(18_655_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `912` + // Measured: `945` // Estimated: `3642` - // Minimum execution time: 35_035_000 picoseconds. - Weight::from_parts(35_975_000, 3642) + // Minimum execution time: 31_580_000 picoseconds. + Weight::from_parts(32_499_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `912` + // Measured: `945` // Estimated: `3642` - // Minimum execution time: 37_636_000 picoseconds. - Weight::from_parts(38_610_000, 3642) + // Minimum execution time: 33_536_000 picoseconds. + Weight::from_parts(34_102_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `809` + // Measured: `842` // Estimated: `3642` - // Minimum execution time: 22_457_000 picoseconds. - Weight::from_parts(23_691_000, 3642) + // Minimum execution time: 18_295_000 picoseconds. + Weight::from_parts(19_496_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: System Account (r:3 w:3) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: // Measured: `682` // Estimated: `8799` - // Minimum execution time: 118_272_000 picoseconds. - Weight::from_parts(121_646_000, 8799) + // Minimum execution time: 102_367_000 picoseconds. + Weight::from_parts(104_352_000, 8799) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `1012` + // Measured: `1045` // Estimated: `6196` - // Minimum execution time: 75_717_000 picoseconds. - Weight::from_parts(77_837_000, 6196) + // Minimum execution time: 69_051_000 picoseconds. + Weight::from_parts(71_297_000, 6196) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: System Account (r:3 w:3) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `1199` + // Measured: `1232` // Estimated: `8799` - // Minimum execution time: 94_215_000 picoseconds. - Weight::from_parts(97_017_000, 8799) + // Minimum execution time: 84_053_000 picoseconds. + Weight::from_parts(86_072_000, 8799) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyCount (r:1 w:1) - /// Proof: ChildBounties ChildBountyCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:0 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyCount` (r:1 w:1) + /// Proof: `ChildBounties::ChildBountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:0 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 300]`. fn add_child_bounty(_d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `712` + // Measured: `745` // Estimated: `6196` - // Minimum execution time: 69_805_000 picoseconds. - Weight::from_parts(73_216_717, 6196) + // Minimum execution time: 60_674_000 picoseconds. + Weight::from_parts(63_477_428, 6196) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `766` + // Measured: `799` // Estimated: `3642` - // Minimum execution time: 18_190_000 picoseconds. - Weight::from_parts(18_932_000, 3642) + // Minimum execution time: 17_754_000 picoseconds. + Weight::from_parts(18_655_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `912` + // Measured: `945` // Estimated: `3642` - // Minimum execution time: 35_035_000 picoseconds. - Weight::from_parts(35_975_000, 3642) + // Minimum execution time: 31_580_000 picoseconds. + Weight::from_parts(32_499_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `912` + // Measured: `945` // Estimated: `3642` - // Minimum execution time: 37_636_000 picoseconds. - Weight::from_parts(38_610_000, 3642) + // Minimum execution time: 33_536_000 picoseconds. + Weight::from_parts(34_102_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `809` + // Measured: `842` // Estimated: `3642` - // Minimum execution time: 22_457_000 picoseconds. - Weight::from_parts(23_691_000, 3642) + // Minimum execution time: 18_295_000 picoseconds. + Weight::from_parts(19_496_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: System Account (r:3 w:3) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: // Measured: `682` // Estimated: `8799` - // Minimum execution time: 118_272_000 picoseconds. - Weight::from_parts(121_646_000, 8799) + // Minimum execution time: 102_367_000 picoseconds. + Weight::from_parts(104_352_000, 8799) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `1012` + // Measured: `1045` // Estimated: `6196` - // Minimum execution time: 75_717_000 picoseconds. - Weight::from_parts(77_837_000, 6196) + // Minimum execution time: 69_051_000 picoseconds. + Weight::from_parts(71_297_000, 6196) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: System Account (r:3 w:3) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `1199` + // Measured: `1232` // Estimated: `8799` - // Minimum execution time: 94_215_000 picoseconds. - Weight::from_parts(97_017_000, 8799) + // Minimum execution time: 84_053_000 picoseconds. + Weight::from_parts(86_072_000, 8799) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } diff --git a/substrate/frame/collective/src/benchmarking.rs b/substrate/frame/collective/src/benchmarking.rs index af10eae5b673d53b03298b3e0cba48c885dec69b..7b5df17b60a696a17b706dd01f05f2a01d3e787e 100644 --- a/substrate/frame/collective/src/benchmarking.rs +++ b/substrate/frame/collective/src/benchmarking.rs @@ -105,7 +105,7 @@ benchmarks_instance_pallet! { }: _(SystemOrigin::Root, new_members.clone(), new_members.last().cloned(), T::MaxMembers::get()) verify { new_members.sort(); - assert_eq!(Collective::::members(), new_members); + assert_eq!(Members::::get(), new_members); } execute { @@ -199,14 +199,14 @@ benchmarks_instance_pallet! { )?; } - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); let proposal: T::Proposal = SystemCall::::remark { remark: id_to_remark_data(p, b as usize) }.into(); }: propose(SystemOrigin::Signed(caller.clone()), threshold, Box::new(proposal.clone()), bytes_in_storage) verify { // New proposal is recorded - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); let proposal_hash = T::Hashing::hash_of(&proposal); assert_last_event::(Event::Proposed { account: caller, proposal_index: p - 1, proposal_hash, threshold }.into()); } @@ -269,7 +269,7 @@ benchmarks_instance_pallet! { approve, )?; - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Voter switches vote to nay, but does not kill the vote, just updates + inserts let approve = false; @@ -280,8 +280,8 @@ benchmarks_instance_pallet! { }: _(SystemOrigin::Signed(voter), last_hash, index, approve) verify { // All proposals exist and the last proposal has just been updated. - assert_eq!(Collective::::proposals().len(), p as usize); - let voting = Collective::::voting(&last_hash).ok_or("Proposal Missing")?; + assert_eq!(Proposals::::get().len(), p as usize); + let voting = Voting::::get(&last_hash).ok_or("Proposal Missing")?; assert_eq!(voting.ayes.len(), (m - 3) as usize); assert_eq!(voting.nays.len(), 1); } @@ -344,7 +344,7 @@ benchmarks_instance_pallet! { approve, )?; - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Voter switches vote to nay, which kills the vote let approve = false; @@ -361,7 +361,7 @@ benchmarks_instance_pallet! { }: close(SystemOrigin::Signed(voter), last_hash, index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); } @@ -428,7 +428,7 @@ benchmarks_instance_pallet! { true, )?; - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Caller switches vote to aye, which passes the vote let index = p - 1; @@ -442,7 +442,7 @@ benchmarks_instance_pallet! { }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Ok(()) }.into()); } @@ -519,12 +519,12 @@ benchmarks_instance_pallet! { )?; System::::set_block_number(BlockNumberFor::::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Prime nay will close it as disapproved }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::MAX, bytes_in_storage) verify { - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); } @@ -591,12 +591,12 @@ benchmarks_instance_pallet! { // caller is prime, prime already votes aye by creating the proposal System::::set_block_number(BlockNumberFor::::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Prime aye will close it as approved }: close(SystemOrigin::Signed(caller), last_hash, p - 1, Weight::MAX, bytes_in_storage) verify { - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Ok(()) }.into()); } @@ -640,11 +640,11 @@ benchmarks_instance_pallet! { } System::::set_block_number(BlockNumberFor::::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); }: _(SystemOrigin::Root, last_hash) verify { - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); } diff --git a/substrate/frame/collective/src/lib.rs b/substrate/frame/collective/src/lib.rs index c084784e0a9bca9914cbbf879c746bfdda1774fb..882e99a6d0051306a103630a893d9fb44b803072 100644 --- a/substrate/frame/collective/src/lib.rs +++ b/substrate/frame/collective/src/lib.rs @@ -176,7 +176,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] @@ -261,36 +261,30 @@ pub mod pallet { /// The hashes of the active proposals. #[pallet::storage] - #[pallet::getter(fn proposals)] pub type Proposals, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; /// Actual proposal for a given hash, if it's current. #[pallet::storage] - #[pallet::getter(fn proposal_of)] pub type ProposalOf, I: 'static = ()> = StorageMap<_, Identity, T::Hash, >::Proposal, OptionQuery>; /// Votes on a given proposal, if it is ongoing. #[pallet::storage] - #[pallet::getter(fn voting)] pub type Voting, I: 'static = ()> = StorageMap<_, Identity, T::Hash, Votes>, OptionQuery>; /// Proposals so far. #[pallet::storage] - #[pallet::getter(fn proposal_count)] pub type ProposalCount, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; /// The current members of the collective. This is stored sorted (just by value). #[pallet::storage] - #[pallet::getter(fn members)] pub type Members, I: 'static = ()> = StorageValue<_, Vec, ValueQuery>; /// The prime member that helps determine the default vote behavior in case of absentations. #[pallet::storage] - #[pallet::getter(fn prime)] pub type Prime, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; #[pallet::event] @@ -459,7 +453,7 @@ pub mod pallet { #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let members = Self::members(); + let members = Members::::get(); ensure!(members.contains(&who), Error::::NotMember); let proposal_len = proposal.encoded_size(); ensure!(proposal_len <= length_bound as usize, Error::::WrongProposalLength); @@ -519,7 +513,7 @@ pub mod pallet { #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let members = Self::members(); + let members = Members::::get(); ensure!(members.contains(&who), Error::::NotMember); if threshold < 2 { @@ -565,7 +559,7 @@ pub mod pallet { approve: bool, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let members = Self::members(); + let members = Members::::get(); ensure!(members.contains(&who), Error::::NotMember); // Detects first vote of the member in the motion @@ -669,7 +663,7 @@ impl, I: 'static> Pallet { pub fn is_member(who: &T::AccountId) -> bool { // Note: The dispatchables *do not* use this to check membership so make sure // to update those if this is changed. - Self::members().contains(who) + Members::::get().contains(who) } /// Execute immediately when adding a new proposal. @@ -688,7 +682,7 @@ impl, I: 'static> Pallet { let proposal_hash = T::Hashing::hash_of(&proposal); ensure!(!>::contains_key(proposal_hash), Error::::DuplicateProposal); - let seats = Self::members().len() as MemberCount; + let seats = Members::::get().len() as MemberCount; let result = proposal.dispatch(RawOrigin::Members(1, seats).into()); Self::deposit_event(Event::Executed { proposal_hash, @@ -721,7 +715,7 @@ impl, I: 'static> Pallet { Ok(proposals.len()) })?; - let index = Self::proposal_count(); + let index = ProposalCount::::get(); >::mutate(|i| *i += 1); >::insert(proposal_hash, proposal); let votes = { @@ -747,7 +741,7 @@ impl, I: 'static> Pallet { index: ProposalIndex, approve: bool, ) -> Result { - let mut voting = Self::voting(&proposal).ok_or(Error::::ProposalMissing)?; + let mut voting = Voting::::get(&proposal).ok_or(Error::::ProposalMissing)?; ensure!(voting.index == index, Error::::WrongIndex); let position_yes = voting.ayes.iter().position(|a| a == &who); @@ -798,12 +792,12 @@ impl, I: 'static> Pallet { proposal_weight_bound: Weight, length_bound: u32, ) -> DispatchResultWithPostInfo { - let voting = Self::voting(&proposal_hash).ok_or(Error::::ProposalMissing)?; + let voting = Voting::::get(&proposal_hash).ok_or(Error::::ProposalMissing)?; ensure!(voting.index == index, Error::::WrongIndex); let mut no_votes = voting.nays.len() as MemberCount; let mut yes_votes = voting.ayes.len() as MemberCount; - let seats = Self::members().len() as MemberCount; + let seats = Members::::get().len() as MemberCount; let approved = yes_votes >= voting.threshold; let disapproved = seats.saturating_sub(no_votes) < voting.threshold; // Allow (dis-)approving the proposal as soon as there are enough votes. @@ -837,7 +831,7 @@ impl, I: 'static> Pallet { // Only allow actual closing of the proposal after the voting period has ended. ensure!(frame_system::Pallet::::block_number() >= voting.end, Error::::TooEarly); - let prime_vote = Self::prime().map(|who| voting.ayes.iter().any(|a| a == &who)); + let prime_vote = Prime::::get().map(|who| voting.ayes.iter().any(|a| a == &who)); // default voting strategy. let default = T::DefaultVote::default_vote(prime_vote, yes_votes, no_votes, seats); @@ -978,29 +972,28 @@ impl, I: 'static> Pallet { /// * The prime account must be a member of the collective. #[cfg(any(feature = "try-runtime", test))] fn do_try_state() -> Result<(), TryRuntimeError> { - Self::proposals() - .into_iter() - .try_for_each(|proposal| -> Result<(), TryRuntimeError> { + Proposals::::get().into_iter().try_for_each( + |proposal| -> Result<(), TryRuntimeError> { ensure!( - Self::proposal_of(proposal).is_some(), + ProposalOf::::get(proposal).is_some(), "Proposal hash from `Proposals` is not found inside the `ProposalOf` mapping." ); Ok(()) - })?; + }, + )?; ensure!( - Self::proposals().into_iter().count() <= Self::proposal_count() as usize, + Proposals::::get().into_iter().count() <= ProposalCount::::get() as usize, "The actual number of proposals is greater than `ProposalCount`" ); ensure!( - Self::proposals().into_iter().count() == >::iter_keys().count(), + Proposals::::get().into_iter().count() == >::iter_keys().count(), "Proposal count inside `Proposals` is not equal to the proposal count in `ProposalOf`" ); - Self::proposals() - .into_iter() - .try_for_each(|proposal| -> Result<(), TryRuntimeError> { - if let Some(votes) = Self::voting(proposal) { + Proposals::::get().into_iter().try_for_each( + |proposal| -> Result<(), TryRuntimeError> { + if let Some(votes) = Voting::::get(proposal) { let ayes = votes.ayes.len(); let nays = votes.nays.len(); @@ -1010,13 +1003,13 @@ impl, I: 'static> Pallet { ); } Ok(()) - })?; + }, + )?; let mut proposal_indices = vec![]; - Self::proposals() - .into_iter() - .try_for_each(|proposal| -> Result<(), TryRuntimeError> { - if let Some(votes) = Self::voting(proposal) { + Proposals::::get().into_iter().try_for_each( + |proposal| -> Result<(), TryRuntimeError> { + if let Some(votes) = Voting::::get(proposal) { let proposal_index = votes.index; ensure!( !proposal_indices.contains(&proposal_index), @@ -1025,12 +1018,13 @@ impl, I: 'static> Pallet { proposal_indices.push(proposal_index); } Ok(()) - })?; + }, + )?; >::iter_keys().try_for_each( |proposal_hash| -> Result<(), TryRuntimeError> { ensure!( - Self::proposals().contains(&proposal_hash), + Proposals::::get().contains(&proposal_hash), "`Proposals` doesn't contain the proposal hash from the `Voting` storage map." ); Ok(()) @@ -1038,17 +1032,17 @@ impl, I: 'static> Pallet { )?; ensure!( - Self::members().len() <= T::MaxMembers::get() as usize, + Members::::get().len() <= T::MaxMembers::get() as usize, "The member count is greater than `MaxMembers`." ); ensure!( - Self::members().windows(2).all(|members| members[0] <= members[1]), + Members::::get().windows(2).all(|members| members[0] <= members[1]), "The members are not sorted by value." ); - if let Some(prime) = Self::prime() { - ensure!(Self::members().contains(&prime), "Prime account is not a member."); + if let Some(prime) = Prime::::get() { + ensure!(Members::::get().contains(&prime), "Prime account is not a member."); } Ok(()) @@ -1082,7 +1076,7 @@ impl, I: 'static> ChangeMembers for Pallet { // remove accounts from all current voting in motions. let mut outgoing = outgoing.to_vec(); outgoing.sort(); - for h in Self::proposals().into_iter() { + for h in Proposals::::get().into_iter() { >::mutate(h, |v| { if let Some(mut votes) = v.take() { votes.ayes = votes diff --git a/substrate/frame/collective/src/tests.rs b/substrate/frame/collective/src/tests.rs index aae17b7ffc27bb6d9e200dee18a44c7ec0aa7dcc..92418794c5fb70fb44d0b60572ef18e3a617cfd9 100644 --- a/substrate/frame/collective/src/tests.rs +++ b/substrate/frame/collective/src/tests.rs @@ -29,7 +29,7 @@ use sp_core::H256; use sp_runtime::{testing::Header, traits::BlakeTwo256, BuildStorage}; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test @@ -193,8 +193,8 @@ fn default_max_proposal_weight() -> Weight { #[test] fn motions_basic_environment_works() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Collective::members(), vec![1, 2, 3]); - assert_eq!(*Collective::proposals(), Vec::::new()); + assert_eq!(Members::::get(), vec![1, 2, 3]); + assert_eq!(*Proposals::::get(), Vec::::new()); }); } @@ -205,7 +205,7 @@ fn initialize_members_sorts_members() { ExtBuilder::default() .set_collective_members(unsorted_members) .build_and_execute(|| { - assert_eq!(Collective::members(), expected_members); + assert_eq!(Members::::get(), expected_members); }); } @@ -219,8 +219,8 @@ fn set_members_with_prime_works() { Some(3), MaxMembers::get() )); - assert_eq!(Collective::members(), members.clone()); - assert_eq!(Collective::prime(), Some(3)); + assert_eq!(Members::::get(), members.clone()); + assert_eq!(Prime::::get(), Some(3)); assert_noop!( Collective::set_members(RuntimeOrigin::root(), members, Some(4), MaxMembers::get()), Error::::PrimeAccountNotMember @@ -632,12 +632,12 @@ fn removal_of_old_voters_votes_works() { assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) ); Collective::change_members_sorted(&[4], &[1], &[2, 3, 4]); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) ); @@ -653,12 +653,12 @@ fn removal_of_old_voters_votes_works() { assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, false)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) ); Collective::change_members_sorted(&[], &[3], &[2, 4]); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) ); }); @@ -680,7 +680,7 @@ fn removal_of_old_voters_votes_works_with_set_members() { assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) ); assert_ok!(Collective::set_members( @@ -690,7 +690,7 @@ fn removal_of_old_voters_votes_works_with_set_members() { MaxMembers::get() )); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) ); @@ -706,7 +706,7 @@ fn removal_of_old_voters_votes_works_with_set_members() { assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, false)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) ); assert_ok!(Collective::set_members( @@ -716,7 +716,7 @@ fn removal_of_old_voters_votes_works_with_set_members() { MaxMembers::get() )); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) ); }); @@ -735,10 +735,10 @@ fn propose_works() { Box::new(proposal.clone()), proposal_len )); - assert_eq!(*Collective::proposals(), vec![hash]); - assert_eq!(Collective::proposal_of(&hash), Some(proposal)); + assert_eq!(*Proposals::::get(), vec![hash]); + assert_eq!(ProposalOf::::get(&hash), Some(proposal)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![], nays: vec![], end }) ); @@ -898,13 +898,13 @@ fn motions_vote_after_works() { )); // Initially there a no votes when the motion is proposed. assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![], end }) ); // Cast first aye vote. assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![1], nays: vec![], end }) ); // Try to cast a duplicate aye vote. @@ -915,7 +915,7 @@ fn motions_vote_after_works() { // Cast a nay vote. assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, false)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![1], end }) ); // Try to cast a duplicate nay vote. @@ -966,7 +966,7 @@ fn motions_all_first_vote_free_works() { proposal_len, )); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![], end }) ); @@ -1031,14 +1031,14 @@ fn motions_reproposing_disapproved_works() { proposal_weight, proposal_len )); - assert_eq!(*Collective::proposals(), vec![]); + assert_eq!(*Proposals::::get(), vec![]); assert_ok!(Collective::propose( RuntimeOrigin::signed(1), 2, Box::new(proposal.clone()), proposal_len )); - assert_eq!(*Collective::proposals(), vec![hash]); + assert_eq!(*Proposals::::get(), vec![hash]); }); } diff --git a/substrate/frame/collective/src/weights.rs b/substrate/frame/collective/src/weights.rs index eece6a006b8f2bda108645e3d8b165a37ad04335..85744b4de9daed455a18b42a16d05933b78d4939 100644 --- a/substrate/frame/collective/src/weights.rs +++ b/substrate/frame/collective/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_collective +//! Autogenerated weights for `pallet_collective` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/collective/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/collective/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_collective. +/// Weight functions needed for `pallet_collective`. pub trait WeightInfo { fn set_members(m: u32, n: u32, p: u32, ) -> Weight; fn execute(b: u32, m: u32, ) -> Weight; @@ -64,30 +63,30 @@ pub trait WeightInfo { fn disapprove_proposal(p: u32, ) -> Weight; } -/// Weights for pallet_collective using the Substrate node and recommended hardware. +/// Weights for `pallet_collective` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Council Members (r:1 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:100 w:100) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Prime (r:0 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:100 w:100) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:0 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[0, 100]`. /// The range of component `n` is `[0, 100]`. /// The range of component `p` is `[0, 100]`. fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` - // Estimated: `15861 + m * (1967 ±24) + p * (4332 ±24)` - // Minimum execution time: 17_506_000 picoseconds. - Weight::from_parts(17_767_000, 15861) - // Standard Error: 60_220 - .saturating_add(Weight::from_parts(4_374_805, 0).saturating_mul(m.into())) - // Standard Error: 60_220 - .saturating_add(Weight::from_parts(8_398_316, 0).saturating_mul(p.into())) + // Estimated: `15894 + m * (1967 ±23) + p * (4332 ±23)` + // Minimum execution time: 15_559_000 picoseconds. + Weight::from_parts(15_870_000, 15894) + // Standard Error: 54_310 + .saturating_add(Weight::from_parts(4_117_753, 0).saturating_mul(m.into())) + // Standard Error: 54_310 + .saturating_add(Weight::from_parts(7_677_627, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -95,245 +94,261 @@ impl WeightInfo for SubstrateWeight { .saturating_add(Weight::from_parts(0, 1967).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 4332).saturating_mul(p.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `202 + m * (32 ±0)` - // Estimated: `1688 + m * (32 ±0)` - // Minimum execution time: 16_203_000 picoseconds. - Weight::from_parts(15_348_267, 1688) - // Standard Error: 37 - .saturating_add(Weight::from_parts(1_766, 0).saturating_mul(b.into())) - // Standard Error: 382 - .saturating_add(Weight::from_parts(15_765, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Measured: `380 + m * (32 ±0)` + // Estimated: `3997 + m * (32 ±0)` + // Minimum execution time: 19_024_000 picoseconds. + Weight::from_parts(18_153_872, 3997) + // Standard Error: 46 + .saturating_add(Weight::from_parts(1_933, 0).saturating_mul(b.into())) + // Standard Error: 478 + .saturating_add(Weight::from_parts(18_872, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:0) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:0) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `202 + m * (32 ±0)` - // Estimated: `3668 + m * (32 ±0)` - // Minimum execution time: 18_642_000 picoseconds. - Weight::from_parts(17_708_609, 3668) - // Standard Error: 58 - .saturating_add(Weight::from_parts(2_285, 0).saturating_mul(b.into())) - // Standard Error: 598 - .saturating_add(Weight::from_parts(30_454, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `380 + m * (32 ±0)` + // Estimated: `3997 + m * (32 ±0)` + // Minimum execution time: 20_895_000 picoseconds. + Weight::from_parts(20_081_761, 3997) + // Standard Error: 57 + .saturating_add(Weight::from_parts(1_982, 0).saturating_mul(b.into())) + // Standard Error: 594 + .saturating_add(Weight::from_parts(32_085, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalCount (r:1 w:1) - /// Proof Skipped: Council ProposalCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalCount` (r:1 w:1) + /// Proof: `Council::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:0 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[2, 100]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `492 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `3884 + m * (33 ±0) + p * (36 ±0)` - // Minimum execution time: 27_067_000 picoseconds. - Weight::from_parts(25_456_964, 3884) - // Standard Error: 112 - .saturating_add(Weight::from_parts(3_773, 0).saturating_mul(b.into())) - // Standard Error: 1_177 - .saturating_add(Weight::from_parts(32_783, 0).saturating_mul(m.into())) - // Standard Error: 1_162 - .saturating_add(Weight::from_parts(194_388, 0).saturating_mul(p.into())) + // Measured: `525 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `3917 + m * (33 ±0) + p * (36 ±0)` + // Minimum execution time: 22_068_000 picoseconds. + Weight::from_parts(19_639_088, 3917) + // Standard Error: 104 + .saturating_add(Weight::from_parts(3_896, 0).saturating_mul(b.into())) + // Standard Error: 1_095 + .saturating_add(Weight::from_parts(31_634, 0).saturating_mul(m.into())) + // Standard Error: 1_081 + .saturating_add(Weight::from_parts(178_702, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `941 + m * (64 ±0)` - // Estimated: `4405 + m * (64 ±0)` - // Minimum execution time: 26_055_000 picoseconds. - Weight::from_parts(27_251_907, 4405) - // Standard Error: 1_008 - .saturating_add(Weight::from_parts(65_947, 0).saturating_mul(m.into())) + // Measured: `974 + m * (64 ±0)` + // Estimated: `4438 + m * (64 ±0)` + // Minimum execution time: 22_395_000 picoseconds. + Weight::from_parts(23_025_217, 4438) + // Standard Error: 842 + .saturating_add(Weight::from_parts(58_102, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:0 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `530 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `3975 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 28_363_000 picoseconds. - Weight::from_parts(28_733_464, 3975) - // Standard Error: 1_275 - .saturating_add(Weight::from_parts(43_236, 0).saturating_mul(m.into())) - // Standard Error: 1_244 - .saturating_add(Weight::from_parts(180_187, 0).saturating_mul(p.into())) + // Measured: `563 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `4008 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 24_179_000 picoseconds. + Weight::from_parts(23_846_394, 4008) + // Standard Error: 1_052 + .saturating_add(Weight::from_parts(40_418, 0).saturating_mul(m.into())) + // Standard Error: 1_026 + .saturating_add(Weight::from_parts(171_653, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `832 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4149 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 40_391_000 picoseconds. - Weight::from_parts(42_695_215, 4149) - // Standard Error: 167 - .saturating_add(Weight::from_parts(3_622, 0).saturating_mul(b.into())) - // Standard Error: 1_772 - .saturating_add(Weight::from_parts(33_830, 0).saturating_mul(m.into())) - // Standard Error: 1_727 - .saturating_add(Weight::from_parts(205_374, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Measured: `1010 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4327 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 42_129_000 picoseconds. + Weight::from_parts(40_808_957, 4327) + // Standard Error: 134 + .saturating_add(Weight::from_parts(3_579, 0).saturating_mul(b.into())) + // Standard Error: 1_425 + .saturating_add(Weight::from_parts(37_166, 0).saturating_mul(m.into())) + // Standard Error: 1_389 + .saturating_add(Weight::from_parts(200_986, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:0) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:0 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `550 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `3995 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 31_368_000 picoseconds. - Weight::from_parts(32_141_835, 3995) - // Standard Error: 1_451 - .saturating_add(Weight::from_parts(36_372, 0).saturating_mul(m.into())) - // Standard Error: 1_415 - .saturating_add(Weight::from_parts(210_635, 0).saturating_mul(p.into())) + // Measured: `583 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `4028 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 26_385_000 picoseconds. + Weight::from_parts(25_713_839, 4028) + // Standard Error: 1_254 + .saturating_add(Weight::from_parts(36_206, 0).saturating_mul(m.into())) + // Standard Error: 1_223 + .saturating_add(Weight::from_parts(195_114, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:0) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `852 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4169 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 43_271_000 picoseconds. - Weight::from_parts(45_495_648, 4169) - // Standard Error: 174 - .saturating_add(Weight::from_parts(3_034, 0).saturating_mul(b.into())) - // Standard Error: 1_840 - .saturating_add(Weight::from_parts(42_209, 0).saturating_mul(m.into())) - // Standard Error: 1_793 - .saturating_add(Weight::from_parts(207_525, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Measured: `1030 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4347 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 42_903_000 picoseconds. + Weight::from_parts(43_152_907, 4347) + // Standard Error: 146 + .saturating_add(Weight::from_parts(3_459, 0).saturating_mul(b.into())) + // Standard Error: 1_548 + .saturating_add(Weight::from_parts(35_321, 0).saturating_mul(m.into())) + // Standard Error: 1_509 + .saturating_add(Weight::from_parts(202_541, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:0 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:0 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `359 + p * (32 ±0)` - // Estimated: `1844 + p * (32 ±0)` - // Minimum execution time: 15_170_000 picoseconds. - Weight::from_parts(17_567_243, 1844) - // Standard Error: 1_430 - .saturating_add(Weight::from_parts(169_040, 0).saturating_mul(p.into())) + // Measured: `392 + p * (32 ±0)` + // Estimated: `1877 + p * (32 ±0)` + // Minimum execution time: 12_656_000 picoseconds. + Weight::from_parts(14_032_951, 1877) + // Standard Error: 1_025 + .saturating_add(Weight::from_parts(159_143, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Council Members (r:1 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:100 w:100) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Prime (r:0 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:100 w:100) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:0 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[0, 100]`. /// The range of component `n` is `[0, 100]`. /// The range of component `p` is `[0, 100]`. fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` - // Estimated: `15861 + m * (1967 ±24) + p * (4332 ±24)` - // Minimum execution time: 17_506_000 picoseconds. - Weight::from_parts(17_767_000, 15861) - // Standard Error: 60_220 - .saturating_add(Weight::from_parts(4_374_805, 0).saturating_mul(m.into())) - // Standard Error: 60_220 - .saturating_add(Weight::from_parts(8_398_316, 0).saturating_mul(p.into())) + // Estimated: `15894 + m * (1967 ±23) + p * (4332 ±23)` + // Minimum execution time: 15_559_000 picoseconds. + Weight::from_parts(15_870_000, 15894) + // Standard Error: 54_310 + .saturating_add(Weight::from_parts(4_117_753, 0).saturating_mul(m.into())) + // Standard Error: 54_310 + .saturating_add(Weight::from_parts(7_677_627, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -341,216 +356,232 @@ impl WeightInfo for () { .saturating_add(Weight::from_parts(0, 1967).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 4332).saturating_mul(p.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `202 + m * (32 ±0)` - // Estimated: `1688 + m * (32 ±0)` - // Minimum execution time: 16_203_000 picoseconds. - Weight::from_parts(15_348_267, 1688) - // Standard Error: 37 - .saturating_add(Weight::from_parts(1_766, 0).saturating_mul(b.into())) - // Standard Error: 382 - .saturating_add(Weight::from_parts(15_765, 0).saturating_mul(m.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Measured: `380 + m * (32 ±0)` + // Estimated: `3997 + m * (32 ±0)` + // Minimum execution time: 19_024_000 picoseconds. + Weight::from_parts(18_153_872, 3997) + // Standard Error: 46 + .saturating_add(Weight::from_parts(1_933, 0).saturating_mul(b.into())) + // Standard Error: 478 + .saturating_add(Weight::from_parts(18_872, 0).saturating_mul(m.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:0) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:0) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `202 + m * (32 ±0)` - // Estimated: `3668 + m * (32 ±0)` - // Minimum execution time: 18_642_000 picoseconds. - Weight::from_parts(17_708_609, 3668) - // Standard Error: 58 - .saturating_add(Weight::from_parts(2_285, 0).saturating_mul(b.into())) - // Standard Error: 598 - .saturating_add(Weight::from_parts(30_454, 0).saturating_mul(m.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `380 + m * (32 ±0)` + // Estimated: `3997 + m * (32 ±0)` + // Minimum execution time: 20_895_000 picoseconds. + Weight::from_parts(20_081_761, 3997) + // Standard Error: 57 + .saturating_add(Weight::from_parts(1_982, 0).saturating_mul(b.into())) + // Standard Error: 594 + .saturating_add(Weight::from_parts(32_085, 0).saturating_mul(m.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalCount (r:1 w:1) - /// Proof Skipped: Council ProposalCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalCount` (r:1 w:1) + /// Proof: `Council::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:0 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[2, 100]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `492 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `3884 + m * (33 ±0) + p * (36 ±0)` - // Minimum execution time: 27_067_000 picoseconds. - Weight::from_parts(25_456_964, 3884) - // Standard Error: 112 - .saturating_add(Weight::from_parts(3_773, 0).saturating_mul(b.into())) - // Standard Error: 1_177 - .saturating_add(Weight::from_parts(32_783, 0).saturating_mul(m.into())) - // Standard Error: 1_162 - .saturating_add(Weight::from_parts(194_388, 0).saturating_mul(p.into())) + // Measured: `525 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `3917 + m * (33 ±0) + p * (36 ±0)` + // Minimum execution time: 22_068_000 picoseconds. + Weight::from_parts(19_639_088, 3917) + // Standard Error: 104 + .saturating_add(Weight::from_parts(3_896, 0).saturating_mul(b.into())) + // Standard Error: 1_095 + .saturating_add(Weight::from_parts(31_634, 0).saturating_mul(m.into())) + // Standard Error: 1_081 + .saturating_add(Weight::from_parts(178_702, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `941 + m * (64 ±0)` - // Estimated: `4405 + m * (64 ±0)` - // Minimum execution time: 26_055_000 picoseconds. - Weight::from_parts(27_251_907, 4405) - // Standard Error: 1_008 - .saturating_add(Weight::from_parts(65_947, 0).saturating_mul(m.into())) + // Measured: `974 + m * (64 ±0)` + // Estimated: `4438 + m * (64 ±0)` + // Minimum execution time: 22_395_000 picoseconds. + Weight::from_parts(23_025_217, 4438) + // Standard Error: 842 + .saturating_add(Weight::from_parts(58_102, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:0 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `530 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `3975 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 28_363_000 picoseconds. - Weight::from_parts(28_733_464, 3975) - // Standard Error: 1_275 - .saturating_add(Weight::from_parts(43_236, 0).saturating_mul(m.into())) - // Standard Error: 1_244 - .saturating_add(Weight::from_parts(180_187, 0).saturating_mul(p.into())) + // Measured: `563 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `4008 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 24_179_000 picoseconds. + Weight::from_parts(23_846_394, 4008) + // Standard Error: 1_052 + .saturating_add(Weight::from_parts(40_418, 0).saturating_mul(m.into())) + // Standard Error: 1_026 + .saturating_add(Weight::from_parts(171_653, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `832 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4149 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 40_391_000 picoseconds. - Weight::from_parts(42_695_215, 4149) - // Standard Error: 167 - .saturating_add(Weight::from_parts(3_622, 0).saturating_mul(b.into())) - // Standard Error: 1_772 - .saturating_add(Weight::from_parts(33_830, 0).saturating_mul(m.into())) - // Standard Error: 1_727 - .saturating_add(Weight::from_parts(205_374, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Measured: `1010 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4327 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 42_129_000 picoseconds. + Weight::from_parts(40_808_957, 4327) + // Standard Error: 134 + .saturating_add(Weight::from_parts(3_579, 0).saturating_mul(b.into())) + // Standard Error: 1_425 + .saturating_add(Weight::from_parts(37_166, 0).saturating_mul(m.into())) + // Standard Error: 1_389 + .saturating_add(Weight::from_parts(200_986, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:0) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:0 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `550 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `3995 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 31_368_000 picoseconds. - Weight::from_parts(32_141_835, 3995) - // Standard Error: 1_451 - .saturating_add(Weight::from_parts(36_372, 0).saturating_mul(m.into())) - // Standard Error: 1_415 - .saturating_add(Weight::from_parts(210_635, 0).saturating_mul(p.into())) + // Measured: `583 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `4028 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 26_385_000 picoseconds. + Weight::from_parts(25_713_839, 4028) + // Standard Error: 1_254 + .saturating_add(Weight::from_parts(36_206, 0).saturating_mul(m.into())) + // Standard Error: 1_223 + .saturating_add(Weight::from_parts(195_114, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:0) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `852 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4169 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 43_271_000 picoseconds. - Weight::from_parts(45_495_648, 4169) - // Standard Error: 174 - .saturating_add(Weight::from_parts(3_034, 0).saturating_mul(b.into())) - // Standard Error: 1_840 - .saturating_add(Weight::from_parts(42_209, 0).saturating_mul(m.into())) - // Standard Error: 1_793 - .saturating_add(Weight::from_parts(207_525, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Measured: `1030 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4347 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 42_903_000 picoseconds. + Weight::from_parts(43_152_907, 4347) + // Standard Error: 146 + .saturating_add(Weight::from_parts(3_459, 0).saturating_mul(b.into())) + // Standard Error: 1_548 + .saturating_add(Weight::from_parts(35_321, 0).saturating_mul(m.into())) + // Standard Error: 1_509 + .saturating_add(Weight::from_parts(202_541, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:0 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:0 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `359 + p * (32 ±0)` - // Estimated: `1844 + p * (32 ±0)` - // Minimum execution time: 15_170_000 picoseconds. - Weight::from_parts(17_567_243, 1844) - // Standard Error: 1_430 - .saturating_add(Weight::from_parts(169_040, 0).saturating_mul(p.into())) + // Measured: `392 + p * (32 ±0)` + // Estimated: `1877 + p * (32 ±0)` + // Minimum execution time: 12_656_000 picoseconds. + Weight::from_parts(14_032_951, 1877) + // Standard Error: 1_025 + .saturating_add(Weight::from_parts(159_143, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) diff --git a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs index 3f26c6f372ef5d04501332d76856c20437a398e3..3c06131dd6088a2b044b68cfbf3b4a918eb37e3e 100644 --- a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs +++ b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs @@ -25,7 +25,7 @@ use frame_support::{ traits::{ConstBool, ConstU32, Contains, Randomness}, weights::Weight, }; -use frame_system::pallet_prelude::BlockNumberFor; +use frame_system::{pallet_prelude::BlockNumberFor, EnsureSigned}; use pallet_xcm::BalanceOf; use sp_runtime::{traits::Convert, Perbill}; @@ -90,6 +90,8 @@ impl pallet_contracts::Config for Runtime { type Schedule = Schedule; type Time = super::Timestamp; type UnsafeUnstableInterface = ConstBool; + type UploadOrigin = EnsureSigned; + type InstantiateOrigin = EnsureSigned; type WeightInfo = (); type WeightPrice = Self; type Debug = (); diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index d8939ee88baf9a610d29394bf0c6334a591ef3d6..f941f152ffea220a5e23587ed2eed96056579a16 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -234,7 +234,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; use sp_runtime::Perbill; - /// The current storage version. + /// The in-code storage version. pub(crate) const STORAGE_VERSION: StorageVersion = StorageVersion::new(15); #[pallet::pallet] @@ -379,6 +379,24 @@ pub mod pallet { #[pallet::constant] type MaxDebugBufferLen: Get; + /// Origin allowed to upload code. + /// + /// By default, it is safe to set this to `EnsureSigned`, allowing anyone to upload contract + /// code. + type UploadOrigin: EnsureOrigin; + + /// Origin allowed to instantiate code. + /// + /// # Note + /// + /// This is not enforced when a contract instantiates another contract. The + /// [`Self::UploadOrigin`] should make sure that no code is deployed that does unwanted + /// instantiations. + /// + /// By default, it is safe to set this to `EnsureSigned`, allowing anyone to instantiate + /// contract code. + type InstantiateOrigin: EnsureOrigin; + /// Overarching hold reason. type RuntimeHoldReason: From; @@ -636,7 +654,7 @@ pub mod pallet { determinism: Determinism, ) -> DispatchResult { Migration::::ensure_migrated()?; - let origin = ensure_signed(origin)?; + let origin = T::UploadOrigin::ensure_origin(origin)?; Self::bare_upload_code(origin, code, storage_deposit_limit.map(Into::into), determinism) .map(|_| ()) } @@ -785,11 +803,17 @@ pub mod pallet { salt: Vec, ) -> DispatchResultWithPostInfo { Migration::::ensure_migrated()?; - let origin = ensure_signed(origin)?; + + // These two origins will usually be the same; however, we treat them as separate since + // it is possible for the `Success` value of `UploadOrigin` and `InstantiateOrigin` to + // differ. + let upload_origin = T::UploadOrigin::ensure_origin(origin.clone())?; + let instantiate_origin = T::InstantiateOrigin::ensure_origin(origin)?; + let code_len = code.len() as u32; let (module, upload_deposit) = Self::try_upload_code( - origin.clone(), + upload_origin, code, storage_deposit_limit.clone().map(Into::into), Determinism::Enforced, @@ -803,7 +827,7 @@ pub mod pallet { let data_len = data.len() as u32; let salt_len = salt.len() as u32; let common = CommonInput { - origin: Origin::from_account_id(origin), + origin: Origin::from_account_id(instantiate_origin), value, data, gas_limit, @@ -844,10 +868,11 @@ pub mod pallet { salt: Vec, ) -> DispatchResultWithPostInfo { Migration::::ensure_migrated()?; + let origin = T::InstantiateOrigin::ensure_origin(origin)?; let data_len = data.len() as u32; let salt_len = salt.len() as u32; let common = CommonInput { - origin: Origin::from_runtime_origin(origin)?, + origin: Origin::from_account_id(origin), value, data, gas_limit, diff --git a/substrate/frame/contracts/src/migration.rs b/substrate/frame/contracts/src/migration.rs index 6d61cb6b1e1af158b9d4942efabc73267f8e44f7..f30ae1ebfadee55eeca000e74b4aac76f93f7551 100644 --- a/substrate/frame/contracts/src/migration.rs +++ b/substrate/frame/contracts/src/migration.rs @@ -263,10 +263,10 @@ impl Migration { impl OnRuntimeUpgrade for Migration { fn on_runtime_upgrade() -> Weight { let name = >::name(); - let current_version = >::current_storage_version(); + let in_code_version = >::in_code_storage_version(); let on_chain_version = >::on_chain_storage_version(); - if on_chain_version == current_version { + if on_chain_version == in_code_version { log::warn!( target: LOG_TARGET, "{name}: No Migration performed storage_version = latest_version = {:?}", @@ -289,7 +289,7 @@ impl OnRuntimeUpgrade for Migration OnRuntimeUpgrade for Migration>::on_chain_storage_version(); - let current_version = >::current_storage_version(); + let in_code_version = >::in_code_storage_version(); - if on_chain_version == current_version { + if on_chain_version == in_code_version { return Ok(Default::default()) } log::debug!( target: LOG_TARGET, - "Requested migration of {} from {:?}(on-chain storage version) to {:?}(current storage version)", - >::name(), on_chain_version, current_version + "Requested migration of {} from {:?}(on-chain storage version) to {:?}(in-code storage version)", + >::name(), on_chain_version, in_code_version ); ensure!( - T::Migrations::is_upgrade_supported(on_chain_version, current_version), - "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, current storage version)" + T::Migrations::is_upgrade_supported(on_chain_version, in_code_version), + "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, in-code storage version)" ); Ok(Default::default()) @@ -421,7 +421,7 @@ impl Migration { }, StepResult::Completed { steps_done } => { in_progress_version.put::>(); - if >::current_storage_version() != in_progress_version { + if >::in_code_storage_version() != in_progress_version { log::info!( target: LOG_TARGET, "{name}: Next migration is {:?},", diff --git a/substrate/frame/contracts/src/migration/v09.rs b/substrate/frame/contracts/src/migration/v09.rs index 98fcccc2c0becedccd4bd15eed98b36fd52662ee..f19bff9d6747f8359558e2c2d5a289fad668bfaa 100644 --- a/substrate/frame/contracts/src/migration/v09.rs +++ b/substrate/frame/contracts/src/migration/v09.rs @@ -28,7 +28,7 @@ use frame_support::{pallet_prelude::*, storage_alias, DefaultNoBound, Identity}; use sp_runtime::TryRuntimeError; use sp_std::prelude::*; -mod old { +mod v8 { use super::*; #[derive(Encode, Decode)] @@ -50,14 +50,14 @@ mod old { #[cfg(feature = "runtime-benchmarks")] pub fn store_old_dummy_code(len: usize) { use sp_runtime::traits::Hash; - let module = old::PrefabWasmModule { + let module = v8::PrefabWasmModule { instruction_weights_version: 0, initial: 0, maximum: 0, code: vec![42u8; len], }; let hash = T::Hashing::hash(&module.code); - old::CodeStorage::::insert(hash, module); + v8::CodeStorage::::insert(hash, module); } #[derive(Encode, Decode)] @@ -89,9 +89,9 @@ impl MigrationStep for Migration { fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_key) = self.last_code_hash.take() { - old::CodeStorage::::iter_from(old::CodeStorage::::hashed_key_for(last_key)) + v8::CodeStorage::::iter_from(v8::CodeStorage::::hashed_key_for(last_key)) } else { - old::CodeStorage::::iter() + v8::CodeStorage::::iter() }; if let Some((key, old)) = iter.next() { @@ -115,7 +115,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let sample: Vec<_> = old::CodeStorage::::iter().take(100).collect(); + let sample: Vec<_> = v8::CodeStorage::::iter().take(100).collect(); log::debug!(target: LOG_TARGET, "Taking sample of {} contract codes", sample.len()); Ok(sample.encode()) @@ -123,7 +123,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { - let sample = , old::PrefabWasmModule)> as Decode>::decode(&mut &state[..]) + let sample = , v8::PrefabWasmModule)> as Decode>::decode(&mut &state[..]) .expect("pre_upgrade_step provides a valid state; qed"); log::debug!(target: LOG_TARGET, "Validating sample of {} contract codes", sample.len()); diff --git a/substrate/frame/contracts/src/migration/v10.rs b/substrate/frame/contracts/src/migration/v10.rs index d64673aac7d268df80adb955ff3d5c5fd16c5681..1bee86e6a773a9b0d7720438644a4ae84d9bd332 100644 --- a/substrate/frame/contracts/src/migration/v10.rs +++ b/substrate/frame/contracts/src/migration/v10.rs @@ -47,7 +47,7 @@ use sp_runtime::{ }; use sp_std::prelude::*; -mod old { +mod v9 { use super::*; pub type BalanceOf = ( ) where OldCurrency: ReservableCurrency<::AccountId> + 'static, { - let info = old::ContractInfo { + let info = v9::ContractInfo { trie_id: info.trie_id, code_hash: info.code_hash, storage_bytes: Default::default(), @@ -94,7 +94,7 @@ pub fn store_old_contract_info( storage_item_deposit: Default::default(), storage_base_deposit: Default::default(), }; - old::ContractInfoOf::::insert(account, info); + v9::ContractInfoOf::::insert(account, info); } #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebugNoBound, TypeInfo, MaxEncodedLen)] @@ -120,9 +120,9 @@ where pub code_hash: CodeHash, storage_bytes: u32, storage_items: u32, - pub storage_byte_deposit: old::BalanceOf, - storage_item_deposit: old::BalanceOf, - storage_base_deposit: old::BalanceOf, + pub storage_byte_deposit: v9::BalanceOf, + storage_item_deposit: v9::BalanceOf, + storage_base_deposit: v9::BalanceOf, } #[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] @@ -152,7 +152,7 @@ fn deposit_address( impl MigrationStep for Migration where OldCurrency: ReservableCurrency<::AccountId> - + Inspect<::AccountId, Balance = old::BalanceOf>, + + Inspect<::AccountId, Balance = v9::BalanceOf>, { const VERSION: u16 = 10; @@ -162,11 +162,11 @@ where fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_account) = self.last_account.take() { - old::ContractInfoOf::::iter_from( - old::ContractInfoOf::::hashed_key_for(last_account), + v9::ContractInfoOf::::iter_from( + v9::ContractInfoOf::::hashed_key_for(last_account), ) } else { - old::ContractInfoOf::::iter() + v9::ContractInfoOf::::iter() }; if let Some((account, contract)) = iter.next() { @@ -276,7 +276,7 @@ where #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let sample: Vec<_> = old::ContractInfoOf::::iter().take(10).collect(); + let sample: Vec<_> = v9::ContractInfoOf::::iter().take(10).collect(); log::debug!(target: LOG_TARGET, "Taking sample of {} contracts", sample.len()); Ok(sample.encode()) @@ -284,7 +284,7 @@ where #[cfg(feature = "try-runtime")] fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { - let sample = )> as Decode>::decode( + let sample = )> as Decode>::decode( &mut &state[..], ) .expect("pre_upgrade_step provides a valid state; qed"); diff --git a/substrate/frame/contracts/src/migration/v11.rs b/substrate/frame/contracts/src/migration/v11.rs index a5b11f6e08977fbbeb737bc2650ad318ddac97c8..9bfbb25edfb44a8072d2d0073ec4cdb4d27cc360 100644 --- a/substrate/frame/contracts/src/migration/v11.rs +++ b/substrate/frame/contracts/src/migration/v11.rs @@ -29,7 +29,7 @@ use sp_runtime::TryRuntimeError; use codec::{Decode, Encode}; use frame_support::{pallet_prelude::*, storage_alias, DefaultNoBound}; use sp_std::{marker::PhantomData, prelude::*}; -mod old { +mod v10 { use super::*; #[derive(Encode, Decode, TypeInfo, MaxEncodedLen)] @@ -51,11 +51,11 @@ pub struct DeletionQueueManager { #[cfg(any(feature = "runtime-benchmarks", feature = "try-runtime"))] pub fn fill_old_queue(len: usize) { - let queue: Vec = - core::iter::repeat_with(|| old::DeletedContract { trie_id: Default::default() }) + let queue: Vec = + core::iter::repeat_with(|| v10::DeletedContract { trie_id: Default::default() }) .take(len) .collect(); - old::DeletionQueue::::set(Some(queue)); + v10::DeletionQueue::::set(Some(queue)); } #[storage_alias] @@ -80,7 +80,7 @@ impl MigrationStep for Migration { } fn step(&mut self) -> (IsFinished, Weight) { - let Some(old_queue) = old::DeletionQueue::::take() else { + let Some(old_queue) = v10::DeletionQueue::::take() else { return (IsFinished::Yes, Weight::zero()) }; let len = old_queue.len(); @@ -106,7 +106,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let old_queue = old::DeletionQueue::::take().unwrap_or_default(); + let old_queue = v10::DeletionQueue::::take().unwrap_or_default(); if old_queue.is_empty() { let len = 10u32; diff --git a/substrate/frame/contracts/src/migration/v12.rs b/substrate/frame/contracts/src/migration/v12.rs index 7dee31503101ba753b0e807d11621be711d4b58b..d9128286df389f84b451660e8d00f4a886b629d4 100644 --- a/substrate/frame/contracts/src/migration/v12.rs +++ b/substrate/frame/contracts/src/migration/v12.rs @@ -34,7 +34,7 @@ use sp_runtime::TryRuntimeError; use sp_runtime::{traits::Zero, FixedPointNumber, FixedU128, Saturating}; use sp_std::prelude::*; -mod old { +mod v11 { use super::*; pub type BalanceOf = , #[codec(compact)] - deposit: old::BalanceOf, + deposit: v11::BalanceOf, #[codec(compact)] refcount: u64, determinism: Determinism, @@ -112,17 +112,17 @@ where let hash = T::Hashing::hash(&code); PristineCode::::insert(hash, code.clone()); - let module = old::PrefabWasmModule { + let module = v11::PrefabWasmModule { instruction_weights_version: Default::default(), initial: Default::default(), maximum: Default::default(), code, determinism: Determinism::Enforced, }; - old::CodeStorage::::insert(hash, module); + v11::CodeStorage::::insert(hash, module); - let info = old::OwnerInfo { owner: account, deposit: u32::MAX.into(), refcount: u64::MAX }; - old::OwnerInfoOf::::insert(hash, info); + let info = v11::OwnerInfo { owner: account, deposit: u32::MAX.into(), refcount: u64::MAX }; + v11::OwnerInfoOf::::insert(hash, info); } #[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] @@ -148,16 +148,16 @@ where fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_key) = self.last_code_hash.take() { - old::OwnerInfoOf::::iter_from( - old::OwnerInfoOf::::hashed_key_for(last_key), + v11::OwnerInfoOf::::iter_from( + v11::OwnerInfoOf::::hashed_key_for(last_key), ) } else { - old::OwnerInfoOf::::iter() + v11::OwnerInfoOf::::iter() }; if let Some((hash, old_info)) = iter.next() { log::debug!(target: LOG_TARGET, "Migrating OwnerInfo for code_hash {:?}", hash); - let module = old::CodeStorage::::take(hash) + let module = v11::CodeStorage::::take(hash) .expect(format!("No PrefabWasmModule found for code_hash: {:?}", hash).as_str()); let code_len = module.code.len(); @@ -184,7 +184,7 @@ where let bytes_before = module .encoded_size() .saturating_add(code_len) - .saturating_add(old::OwnerInfo::::max_encoded_len()) + .saturating_add(v11::OwnerInfo::::max_encoded_len()) as u32; let items_before = 3u32; let deposit_expected_before = price_per_byte @@ -241,10 +241,10 @@ where fn pre_upgrade_step() -> Result, TryRuntimeError> { let len = 100; log::debug!(target: LOG_TARGET, "Taking sample of {} OwnerInfo(s)", len); - let sample: Vec<_> = old::OwnerInfoOf::::iter() + let sample: Vec<_> = v11::OwnerInfoOf::::iter() .take(len) .map(|(k, v)| { - let module = old::CodeStorage::::get(k) + let module = v11::CodeStorage::::get(k) .expect("No PrefabWasmModule found for code_hash: {:?}"); let info: CodeInfo = CodeInfo { determinism: module.determinism, @@ -258,9 +258,9 @@ where .collect(); let storage: u32 = - old::CodeStorage::::iter().map(|(_k, v)| v.encoded_size() as u32).sum(); - let mut deposit: old::BalanceOf = Default::default(); - old::OwnerInfoOf::::iter().for_each(|(_k, v)| deposit += v.deposit); + v11::CodeStorage::::iter().map(|(_k, v)| v.encoded_size() as u32).sum(); + let mut deposit: v11::BalanceOf = Default::default(); + v11::OwnerInfoOf::::iter().for_each(|(_k, v)| deposit += v.deposit); Ok((sample, deposit, storage).encode()) } @@ -269,7 +269,7 @@ where fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { let state = <( Vec<(CodeHash, CodeInfo)>, - old::BalanceOf, + v11::BalanceOf, u32, ) as Decode>::decode(&mut &state[..]) .unwrap(); @@ -283,7 +283,7 @@ where ensure!(info.refcount == old.refcount, "invalid refcount"); } - if let Some((k, _)) = old::CodeStorage::::iter().next() { + if let Some((k, _)) = v11::CodeStorage::::iter().next() { log::warn!( target: LOG_TARGET, "CodeStorage is still NOT empty, found code_hash: {:?}", @@ -292,7 +292,7 @@ where } else { log::debug!(target: LOG_TARGET, "CodeStorage is empty."); } - if let Some((k, _)) = old::OwnerInfoOf::::iter().next() { + if let Some((k, _)) = v11::OwnerInfoOf::::iter().next() { log::warn!( target: LOG_TARGET, "OwnerInfoOf is still NOT empty, found code_hash: {:?}", @@ -302,7 +302,7 @@ where log::debug!(target: LOG_TARGET, "OwnerInfoOf is empty."); } - let mut deposit: old::BalanceOf = Default::default(); + let mut deposit: v11::BalanceOf = Default::default(); let mut items = 0u32; let mut storage_info = 0u32; CodeInfoOf::::iter().for_each(|(_k, v)| { diff --git a/substrate/frame/contracts/src/migration/v13.rs b/substrate/frame/contracts/src/migration/v13.rs index dd2eb12eb62a5daf92cec8b5910c03f6d81ae0c0..498c44d53abce9730e9833fcbd27e1b2dbb2fe38 100644 --- a/substrate/frame/contracts/src/migration/v13.rs +++ b/substrate/frame/contracts/src/migration/v13.rs @@ -28,7 +28,7 @@ use frame_support::{pallet_prelude::*, storage_alias, DefaultNoBound}; use sp_runtime::BoundedBTreeMap; use sp_std::prelude::*; -mod old { +mod v12 { use super::*; #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -59,7 +59,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co let entropy = (b"contract_depo_v1", account.clone()).using_encoded(T::Hashing::hash); let deposit_account = Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) .expect("infinite length input; no invalid inputs for type; qed"); - let info = old::ContractInfo { + let info = v12::ContractInfo { trie_id: info.trie_id.clone(), deposit_account, code_hash: info.code_hash, @@ -69,7 +69,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co storage_item_deposit: Default::default(), storage_base_deposit: Default::default(), }; - old::ContractInfoOf::::insert(account, info); + v12::ContractInfoOf::::insert(account, info); } #[storage_alias] @@ -104,11 +104,11 @@ impl MigrationStep for Migration { fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_account) = self.last_account.take() { - old::ContractInfoOf::::iter_from(old::ContractInfoOf::::hashed_key_for( + v12::ContractInfoOf::::iter_from(v12::ContractInfoOf::::hashed_key_for( last_account, )) } else { - old::ContractInfoOf::::iter() + v12::ContractInfoOf::::iter() }; if let Some((key, old)) = iter.next() { diff --git a/substrate/frame/contracts/src/migration/v14.rs b/substrate/frame/contracts/src/migration/v14.rs index 94534d05fdf889d05227149efd57ede584ecb013..09da09e5bcfb1f9ab4396a651e67519a75e09df2 100644 --- a/substrate/frame/contracts/src/migration/v14.rs +++ b/substrate/frame/contracts/src/migration/v14.rs @@ -44,7 +44,7 @@ use sp_runtime::{traits::Zero, Saturating}; #[cfg(feature = "try-runtime")] use sp_std::collections::btree_map::BTreeMap; -mod old { +mod v13 { use super::*; pub type BalanceOf = , #[codec(compact)] - pub deposit: old::BalanceOf, + pub deposit: v13::BalanceOf, #[codec(compact)] pub refcount: u64, pub determinism: Determinism, @@ -86,14 +86,14 @@ where let code = vec![42u8; len as usize]; let hash = T::Hashing::hash(&code); - let info = old::CodeInfo { + let info = v13::CodeInfo { owner: account, deposit: 10_000u32.into(), refcount: u64::MAX, determinism: Determinism::Enforced, code_len: len, }; - old::CodeInfoOf::::insert(hash, info); + v13::CodeInfoOf::::insert(hash, info); } #[cfg(feature = "try-runtime")] @@ -105,9 +105,9 @@ where OldCurrency: ReservableCurrency<::AccountId>, { /// Total reserved balance as code upload deposit for the owner. - reserved: old::BalanceOf, + reserved: v13::BalanceOf, /// Total balance of the owner. - total: old::BalanceOf, + total: v13::BalanceOf, } #[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] @@ -134,11 +134,11 @@ where fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_hash) = self.last_code_hash.take() { - old::CodeInfoOf::::iter_from( - old::CodeInfoOf::::hashed_key_for(last_hash), + v13::CodeInfoOf::::iter_from( + v13::CodeInfoOf::::hashed_key_for(last_hash), ) } else { - old::CodeInfoOf::::iter() + v13::CodeInfoOf::::iter() }; if let Some((hash, code_info)) = iter.next() { @@ -194,7 +194,7 @@ where #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let info: Vec<_> = old::CodeInfoOf::::iter().collect(); + let info: Vec<_> = v13::CodeInfoOf::::iter().collect(); let mut owner_balance_allocation = BTreeMap::, BalanceAllocation>::new(); diff --git a/substrate/frame/contracts/src/migration/v15.rs b/substrate/frame/contracts/src/migration/v15.rs index 180fe855ca66728ba18b17555363eeac11a340ba..c77198d6fea0f6b7c305d40b981330cbb50e355d 100644 --- a/substrate/frame/contracts/src/migration/v15.rs +++ b/substrate/frame/contracts/src/migration/v15.rs @@ -46,7 +46,7 @@ use sp_runtime::{traits::Zero, Saturating}; #[cfg(feature = "try-runtime")] use sp_std::vec::Vec; -mod old { +mod v14 { use super::*; #[derive( @@ -81,7 +81,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co let entropy = (b"contract_depo_v1", account.clone()).using_encoded(T::Hashing::hash); let deposit_account = Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) .expect("infinite length input; no invalid inputs for type; qed"); - let info = old::ContractInfo { + let info = v14::ContractInfo { trie_id: info.trie_id.clone(), deposit_account, code_hash: info.code_hash, @@ -92,7 +92,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co storage_base_deposit: info.storage_base_deposit(), delegate_dependencies: info.delegate_dependencies().clone(), }; - old::ContractInfoOf::::insert(account, info); + v14::ContractInfoOf::::insert(account, info); } #[derive(Encode, Decode, CloneNoBound, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -127,11 +127,11 @@ impl MigrationStep for Migration { fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_account) = self.last_account.take() { - old::ContractInfoOf::::iter_from(old::ContractInfoOf::::hashed_key_for( + v14::ContractInfoOf::::iter_from(v14::ContractInfoOf::::hashed_key_for( last_account, )) } else { - old::ContractInfoOf::::iter() + v14::ContractInfoOf::::iter() }; if let Some((account, old_contract)) = iter.next() { @@ -243,11 +243,11 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let sample: Vec<_> = old::ContractInfoOf::::iter().take(100).collect(); + let sample: Vec<_> = v14::ContractInfoOf::::iter().take(100).collect(); log::debug!(target: LOG_TARGET, "Taking sample of {} contracts", sample.len()); - let state: Vec<(T::AccountId, old::ContractInfo, BalanceOf, BalanceOf)> = sample + let state: Vec<(T::AccountId, v14::ContractInfo, BalanceOf, BalanceOf)> = sample .iter() .map(|(account, contract)| { ( @@ -265,7 +265,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { let sample = - , BalanceOf, BalanceOf)> as Decode>::decode( + , BalanceOf, BalanceOf)> as Decode>::decode( &mut &state[..], ) .expect("pre_upgrade_step provides a valid state; qed"); diff --git a/substrate/frame/contracts/src/storage.rs b/substrate/frame/contracts/src/storage.rs index 3304166607d28af88ebd381e46bfd18f3c3b293c..52c5150ca21750f9558eeb0fdc6fe21bb27ed629 100644 --- a/substrate/frame/contracts/src/storage.rs +++ b/substrate/frame/contracts/src/storage.rs @@ -322,7 +322,8 @@ impl ContractInfo { KillStorageResult::SomeRemaining(_) => return weight_limit, KillStorageResult::AllRemoved(keys_removed) => { entry.remove(); - remaining_key_budget = remaining_key_budget.saturating_sub(keys_removed); + // charge at least one key even if none were removed. + remaining_key_budget = remaining_key_budget.saturating_sub(keys_removed.max(1)); }, }; } diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 2d1c5b315a34180cd1c007a2d7ebdc5bb6d0b705..2f8f6d4a4377a961ebc4582416268cd22d4b4d34 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -45,6 +45,7 @@ use frame_support::{ assert_err, assert_err_ignore_postinfo, assert_err_with_weight, assert_noop, assert_ok, derive_impl, dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, + pallet_prelude::EnsureOrigin, parameter_types, storage::child, traits::{ @@ -434,6 +435,33 @@ impl Contains for TestFilter { } } +parameter_types! { + pub static UploadAccount: Option<::AccountId> = None; + pub static InstantiateAccount: Option<::AccountId> = None; +} + +pub struct EnsureAccount(sp_std::marker::PhantomData<(T, A)>); +impl>>> + EnsureOrigin<::RuntimeOrigin> for EnsureAccount +where + ::AccountId: From, +{ + type Success = T::AccountId; + + fn try_origin(o: T::RuntimeOrigin) -> Result { + let who = as EnsureOrigin<_>>::try_origin(o.clone())?; + if matches!(A::get(), Some(a) if who != a) { + return Err(o) + } + + Ok(who) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Err(()) + } +} parameter_types! { pub static UnstableInterface: bool = true; } @@ -458,6 +486,8 @@ impl Config for Test { type MaxCodeLen = ConstU32<{ 123 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; type UnsafeUnstableInterface = UnstableInterface; + type UploadOrigin = EnsureAccount; + type InstantiateOrigin = EnsureAccount; type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type RuntimeHoldReason = RuntimeHoldReason; type Migrations = crate::migration::codegen::BenchMigrations; @@ -5924,7 +5954,106 @@ fn root_cannot_instantiate() { vec![], vec![], ), - DispatchError::RootNotAllowed + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn only_upload_origin_can_upload() { + let (wasm, _) = compile_module::("dummy").unwrap(); + UploadAccount::set(Some(ALICE)); + ExtBuilder::default().build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); + let _ = Balances::set_balance(&BOB, 1_000_000); + + assert_err!( + Contracts::upload_code( + RuntimeOrigin::root(), + wasm.clone(), + None, + Determinism::Enforced, + ), + DispatchError::BadOrigin + ); + + assert_err!( + Contracts::upload_code( + RuntimeOrigin::signed(BOB), + wasm.clone(), + None, + Determinism::Enforced, + ), + DispatchError::BadOrigin + ); + + // Only alice is allowed to upload contract code. + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm.clone(), + None, + Determinism::Enforced, + )); + }); +} + +#[test] +fn only_instantiation_origin_can_instantiate() { + let (code, code_hash) = compile_module::("dummy").unwrap(); + InstantiateAccount::set(Some(ALICE)); + ExtBuilder::default().build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); + let _ = Balances::set_balance(&BOB, 1_000_000); + + assert_err_ignore_postinfo!( + Contracts::instantiate_with_code( + RuntimeOrigin::root(), + 0, + GAS_LIMIT, + None, + code.clone(), + vec![], + vec![], + ), + DispatchError::BadOrigin + ); + + assert_err_ignore_postinfo!( + Contracts::instantiate_with_code( + RuntimeOrigin::signed(BOB), + 0, + GAS_LIMIT, + None, + code.clone(), + vec![], + vec![], + ), + DispatchError::BadOrigin + ); + + // Only Alice can instantiate + assert_ok!(Contracts::instantiate_with_code( + RuntimeOrigin::signed(ALICE), + 0, + GAS_LIMIT, + None, + code, + vec![], + vec![], + ),); + + // Bob cannot instantiate with either `instantiate_with_code` or `instantiate`. + assert_err_ignore_postinfo!( + Contracts::instantiate( + RuntimeOrigin::signed(BOB), + 0, + GAS_LIMIT, + None, + code_hash, + vec![], + vec![], + ), + DispatchError::BadOrigin ); }); } diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index c96c28565095592b4609e63b5701d91df2e04786..34b45cba5d2862a6336d72e77bea288c68c76e7c 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -1821,17 +1821,6 @@ mod tests { assert!(weight_left.all_gt(actual_left), "gas_left must be greater than final"); } - /// Test that [`frame_support::weights::OldWeight`] en/decodes the same as our - /// [`crate::OldWeight`]. - #[test] - fn old_weight_decode() { - #![allow(deprecated)] - let sp = frame_support::weights::OldWeight(42).encode(); - let our = crate::OldWeight::decode(&mut &*sp).unwrap(); - - assert_eq!(our, 42); - } - const CODE_VALUE_TRANSFERRED: &str = r#" (module (import "seal0" "seal_value_transferred" (func $seal_value_transferred (param i32 i32))) diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index 962591290b3149d6b72ea737871b98f38f5c07a3..47ca17c7d800cfe986948ccf10128457e7620162 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_contracts` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_contracts +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_contracts -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/contracts/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -141,8 +143,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 1_997_000 picoseconds. - Weight::from_parts(2_130_000, 1627) + // Minimum execution time: 2_103_000 picoseconds. + Weight::from_parts(2_260_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -152,10 +154,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 12_276_000 picoseconds. - Weight::from_parts(1_593_881, 442) - // Standard Error: 1_135 - .saturating_add(Weight::from_parts(1_109_302, 0).saturating_mul(k.into())) + // Minimum execution time: 12_173_000 picoseconds. + Weight::from_parts(12_515_000, 442) + // Standard Error: 1_033 + .saturating_add(Weight::from_parts(1_075_765, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -169,8 +171,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 8_176_000 picoseconds. - Weight::from_parts(8_555_388, 6149) + // Minimum execution time: 8_054_000 picoseconds. + Weight::from_parts(8_503_485, 6149) // Standard Error: 1 .saturating_add(Weight::from_parts(1_184, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) @@ -185,8 +187,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_270_000 picoseconds. - Weight::from_parts(16_779_000, 6450) + // Minimum execution time: 16_966_000 picoseconds. + Weight::from_parts(17_445_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -199,10 +201,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_572_000 picoseconds. - Weight::from_parts(1_950_905, 3635) - // Standard Error: 1_597 - .saturating_add(Weight::from_parts(1_123_190, 0).saturating_mul(k.into())) + // Minimum execution time: 3_643_000 picoseconds. + Weight::from_parts(3_714_000, 3635) + // Standard Error: 1_056 + .saturating_add(Weight::from_parts(1_089_042, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -212,6 +214,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) @@ -219,13 +223,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn v12_migration_step(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `325 + c * (1 ±0)` - // Estimated: `6263 + c * (1 ±0)` - // Minimum execution time: 16_873_000 picoseconds. - Weight::from_parts(16_790_402, 6263) + // Measured: `328 + c * (1 ±0)` + // Estimated: `6266 + c * (1 ±0)` + // Minimum execution time: 19_891_000 picoseconds. + Weight::from_parts(19_961_608, 6266) // Standard Error: 1 - .saturating_add(Weight::from_parts(396, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(Weight::from_parts(429, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } @@ -235,8 +239,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 11_904_000 picoseconds. - Weight::from_parts(12_785_000, 6380) + // Minimum execution time: 12_483_000 picoseconds. + Weight::from_parts(13_205_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -245,13 +249,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) fn v14_migration_step() -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 44_920_000 picoseconds. - Weight::from_parts(46_163_000, 6292) + // Minimum execution time: 42_443_000 picoseconds. + Weight::from_parts(43_420_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -263,8 +267,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 53_864_000 picoseconds. - Weight::from_parts(55_139_000, 6534) + // Minimum execution time: 52_282_000 picoseconds. + Weight::from_parts(54_110_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -274,8 +278,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_375_000 picoseconds. - Weight::from_parts(2_487_000, 1627) + // Minimum execution time: 2_539_000 picoseconds. + Weight::from_parts(2_644_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -287,8 +291,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 11_580_000 picoseconds. - Weight::from_parts(11_980_000, 3631) + // Minimum execution time: 9_473_000 picoseconds. + Weight::from_parts(9_907_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -298,8 +302,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_557_000 picoseconds. - Weight::from_parts(4_807_000, 3607) + // Minimum execution time: 3_532_000 picoseconds. + Weight::from_parts(3_768_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -310,8 +314,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_253_000 picoseconds. - Weight::from_parts(6_479_000, 3632) + // Minimum execution time: 5_036_000 picoseconds. + Weight::from_parts(5_208_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -322,13 +326,15 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_166_000 picoseconds. - Weight::from_parts(6_545_000, 3607) + // Minimum execution time: 4_881_000 picoseconds. + Weight::from_parts(5_149_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -344,22 +350,24 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `801 + c * (1 ±0)` - // Estimated: `6739 + c * (1 ±0)` - // Minimum execution time: 282_232_000 picoseconds. - Weight::from_parts(266_148_573, 6739) - // Standard Error: 69 - .saturating_add(Weight::from_parts(34_592, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `804 + c * (1 ±0)` + // Estimated: `9217 + c * (1 ±0)` + // Minimum execution time: 280_047_000 picoseconds. + Weight::from_parts(270_065_882, 9217) + // Standard Error: 86 + .saturating_add(Weight::from_parts(34_361, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:3 w:3) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) @@ -377,17 +385,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `323` - // Estimated: `8737` - // Minimum execution time: 3_760_879_000 picoseconds. - Weight::from_parts(794_812_431, 8737) + // Measured: `326` + // Estimated: `8740` + // Minimum execution time: 3_776_071_000 picoseconds. + Weight::from_parts(706_212_213, 8740) // Standard Error: 149 - .saturating_add(Weight::from_parts(101_881, 0).saturating_mul(c.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_404, 0).saturating_mul(i.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_544, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + .saturating_add(Weight::from_parts(98_798, 0).saturating_mul(c.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_560, 0).saturating_mul(i.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_476, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -396,6 +404,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -407,24 +417,26 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `560` - // Estimated: `6504` - // Minimum execution time: 1_953_162_000 picoseconds. - Weight::from_parts(374_252_840, 6504) + // Measured: `563` + // Estimated: `8982` + // Minimum execution time: 1_966_301_000 picoseconds. + Weight::from_parts(376_287_434, 8982) // Standard Error: 7 - .saturating_add(Weight::from_parts(1_630, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_643, 0).saturating_mul(i.into())) // Standard Error: 7 - .saturating_add(Weight::from_parts(1_650, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(Weight::from_parts(1_667, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -439,19 +451,21 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `826` - // Estimated: `6766` - // Minimum execution time: 187_899_000 picoseconds. - Weight::from_parts(195_510_000, 6766) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `829` + // Estimated: `9244` + // Minimum execution time: 197_571_000 picoseconds. + Weight::from_parts(206_612_000, 9244) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:1 w:1) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) @@ -459,13 +473,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn upload_code(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 254_800_000 picoseconds. - Weight::from_parts(285_603_050, 3607) - // Standard Error: 62 - .saturating_add(Weight::from_parts(66_212, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Measured: `145` + // Estimated: `6085` + // Minimum execution time: 266_006_000 picoseconds. + Weight::from_parts(287_700_788, 6085) + // Standard Error: 67 + .saturating_add(Weight::from_parts(63_196, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -473,7 +487,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:1 w:1) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) @@ -482,8 +496,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 43_553_000 picoseconds. - Weight::from_parts(45_036_000, 3780) + // Minimum execution time: 42_714_000 picoseconds. + Weight::from_parts(44_713_000, 3780) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -499,8 +513,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 33_223_000 picoseconds. - Weight::from_parts(34_385_000, 8967) + // Minimum execution time: 33_516_000 picoseconds. + Weight::from_parts(34_823_000, 8967) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -508,6 +522,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -521,13 +537,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_caller(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `866 + r * (6 ±0)` - // Estimated: `6806 + r * (6 ±0)` - // Minimum execution time: 254_213_000 picoseconds. - Weight::from_parts(273_464_980, 6806) - // Standard Error: 1_362 - .saturating_add(Weight::from_parts(322_619, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `869 + r * (6 ±0)` + // Estimated: `9284 + r * (6 ±0)` + // Minimum execution time: 246_563_000 picoseconds. + Weight::from_parts(274_999_814, 9284) + // Standard Error: 628 + .saturating_add(Weight::from_parts(347_395, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -535,6 +551,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -548,13 +566,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_is_contract(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `922 + r * (209 ±0)` - // Estimated: `6826 + r * (2684 ±0)` - // Minimum execution time: 250_273_000 picoseconds. - Weight::from_parts(122_072_782, 6826) - // Standard Error: 5_629 - .saturating_add(Weight::from_parts(3_490_256, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `925 + r * (209 ±0)` + // Estimated: `9304 + r * (2684 ±0)` + // Minimum execution time: 252_236_000 picoseconds. + Weight::from_parts(121_390_081, 9304) + // Standard Error: 6_206 + .saturating_add(Weight::from_parts(3_558_718, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 2684).saturating_mul(r.into())) @@ -563,6 +581,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -576,13 +596,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (213 ±0)` - // Estimated: `6830 + r * (2688 ±0)` - // Minimum execution time: 255_187_000 picoseconds. - Weight::from_parts(118_082_505, 6830) - // Standard Error: 6_302 - .saturating_add(Weight::from_parts(4_246_968, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `924 + r * (213 ±0)` + // Estimated: `9308 + r * (2688 ±0)` + // Minimum execution time: 269_427_000 picoseconds. + Weight::from_parts(134_879_389, 9308) + // Standard Error: 6_711 + .saturating_add(Weight::from_parts(4_408_658, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 2688).saturating_mul(r.into())) @@ -591,6 +611,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -604,13 +626,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_own_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873 + r * (6 ±0)` - // Estimated: `6815 + r * (6 ±0)` - // Minimum execution time: 256_833_000 picoseconds. - Weight::from_parts(273_330_216, 6815) - // Standard Error: 881 - .saturating_add(Weight::from_parts(400_105, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `876 + r * (6 ±0)` + // Estimated: `9293 + r * (6 ±0)` + // Minimum execution time: 249_382_000 picoseconds. + Weight::from_parts(266_305_110, 9293) + // Standard Error: 1_592 + .saturating_add(Weight::from_parts(460_001, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -618,6 +640,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -631,18 +655,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (3 ±0)` - // Estimated: `6804 + r * (3 ±0)` - // Minimum execution time: 244_193_000 picoseconds. - Weight::from_parts(271_221_908, 6804) - // Standard Error: 442 - .saturating_add(Weight::from_parts(176_480, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `866 + r * (3 ±0)` + // Estimated: `9282 + r * (3 ±0)` + // Minimum execution time: 252_145_000 picoseconds. + Weight::from_parts(277_211_668, 9282) + // Standard Error: 371 + .saturating_add(Weight::from_parts(174_394, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -656,13 +682,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_root(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `753 + r * (3 ±0)` - // Estimated: `6693 + r * (3 ±0)` - // Minimum execution time: 232_603_000 picoseconds. - Weight::from_parts(260_577_368, 6693) - // Standard Error: 365 - .saturating_add(Weight::from_parts(158_126, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `756 + r * (3 ±0)` + // Estimated: `9171 + r * (3 ±0)` + // Minimum execution time: 251_595_000 picoseconds. + Weight::from_parts(268_102_575, 9171) + // Standard Error: 334 + .saturating_add(Weight::from_parts(154_836, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -670,6 +696,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -683,13 +711,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `867 + r * (6 ±0)` - // Estimated: `6807 + r * (6 ±0)` - // Minimum execution time: 247_564_000 picoseconds. - Weight::from_parts(275_108_914, 6807) - // Standard Error: 505 - .saturating_add(Weight::from_parts(315_065, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `870 + r * (6 ±0)` + // Estimated: `9285 + r * (6 ±0)` + // Minimum execution time: 251_404_000 picoseconds. + Weight::from_parts(278_747_574, 9285) + // Standard Error: 782 + .saturating_add(Weight::from_parts(341_598, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -697,6 +725,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -710,13 +740,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (6 ±0)` - // Estimated: `6806 + r * (6 ±0)` - // Minimum execution time: 258_799_000 picoseconds. - Weight::from_parts(274_338_256, 6806) - // Standard Error: 632 - .saturating_add(Weight::from_parts(355_032, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `866 + r * (6 ±0)` + // Estimated: `9284 + r * (6 ±0)` + // Minimum execution time: 255_649_000 picoseconds. + Weight::from_parts(276_509_641, 9284) + // Standard Error: 1_262 + .saturating_add(Weight::from_parts(380_225, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -724,6 +754,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:2 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -737,13 +769,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1007 + r * (6 ±0)` - // Estimated: `6931 + r * (6 ±0)` - // Minimum execution time: 253_335_000 picoseconds. - Weight::from_parts(273_013_859, 6931) - // Standard Error: 2_007 - .saturating_add(Weight::from_parts(1_540_735, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1010 + r * (6 ±0)` + // Estimated: `9409 + r * (6 ±0)` + // Minimum execution time: 267_143_000 picoseconds. + Weight::from_parts(298_166_116, 9409) + // Standard Error: 3_765 + .saturating_add(Weight::from_parts(1_620_886, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -751,6 +783,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -764,13 +798,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_value_transferred(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `877 + r * (6 ±0)` - // Estimated: `6823 + r * (6 ±0)` - // Minimum execution time: 252_325_000 picoseconds. - Weight::from_parts(274_733_944, 6823) - // Standard Error: 603 - .saturating_add(Weight::from_parts(314_467, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `880 + r * (6 ±0)` + // Estimated: `9301 + r * (6 ±0)` + // Minimum execution time: 250_013_000 picoseconds. + Weight::from_parts(281_469_583, 9301) + // Standard Error: 730 + .saturating_add(Weight::from_parts(339_260, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -778,6 +812,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -791,13 +827,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `875 + r * (6 ±0)` - // Estimated: `6816 + r * (6 ±0)` - // Minimum execution time: 250_698_000 picoseconds. - Weight::from_parts(271_707_578, 6816) - // Standard Error: 952 - .saturating_add(Weight::from_parts(318_412, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `878 + r * (6 ±0)` + // Estimated: `9294 + r * (6 ±0)` + // Minimum execution time: 256_219_000 picoseconds. + Weight::from_parts(275_309_266, 9294) + // Standard Error: 1_199 + .saturating_add(Weight::from_parts(350_824, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -805,6 +841,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -818,13 +856,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872 + r * (6 ±0)` - // Estimated: `6819 + r * (6 ±0)` - // Minimum execution time: 251_854_000 picoseconds. - Weight::from_parts(272_002_212, 6819) - // Standard Error: 622 - .saturating_add(Weight::from_parts(313_353, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `875 + r * (6 ±0)` + // Estimated: `9297 + r * (6 ±0)` + // Minimum execution time: 264_644_000 picoseconds. + Weight::from_parts(283_856_744, 9297) + // Standard Error: 623 + .saturating_add(Weight::from_parts(334_175, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -832,6 +870,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -845,13 +885,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (6 ±0)` - // Estimated: `6804 + r * (6 ±0)` - // Minimum execution time: 252_010_000 picoseconds. - Weight::from_parts(270_387_000, 6804) - // Standard Error: 659 - .saturating_add(Weight::from_parts(325_856, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `866 + r * (6 ±0)` + // Estimated: `9282 + r * (6 ±0)` + // Minimum execution time: 263_364_000 picoseconds. + Weight::from_parts(281_379_508, 9282) + // Standard Error: 639 + .saturating_add(Weight::from_parts(338_021, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -859,6 +899,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -874,13 +916,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_weight_to_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `937 + r * (14 ±0)` - // Estimated: `6872 + r * (14 ±0)` - // Minimum execution time: 247_933_000 picoseconds. - Weight::from_parts(281_550_162, 6872) - // Standard Error: 660 - .saturating_add(Weight::from_parts(1_090_869, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `940 + r * (14 ±0)` + // Estimated: `9350 + r * (14 ±0)` + // Minimum execution time: 269_667_000 picoseconds. + Weight::from_parts(284_662_802, 9350) + // Standard Error: 691 + .saturating_add(Weight::from_parts(799_249, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 14).saturating_mul(r.into())) } @@ -888,6 +930,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -901,13 +945,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_input(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `865 + r * (6 ±0)` - // Estimated: `6807 + r * (6 ±0)` - // Minimum execution time: 251_158_000 picoseconds. - Weight::from_parts(274_623_152, 6807) - // Standard Error: 491 - .saturating_add(Weight::from_parts(263_916, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `868 + r * (6 ±0)` + // Estimated: `9285 + r * (6 ±0)` + // Minimum execution time: 267_018_000 picoseconds. + Weight::from_parts(281_842_630, 9285) + // Standard Error: 453 + .saturating_add(Weight::from_parts(255_373, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -915,6 +959,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -928,19 +974,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_input_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `869` - // Estimated: `6809` - // Minimum execution time: 263_205_000 picoseconds. - Weight::from_parts(216_792_893, 6809) + // Measured: `872` + // Estimated: `9287` + // Minimum execution time: 258_208_000 picoseconds. + Weight::from_parts(227_686_792, 9287) // Standard Error: 23 .saturating_add(Weight::from_parts(989, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -954,11 +1002,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `853 + r * (45 ±0)` - // Estimated: `6793 + r * (45 ±0)` - // Minimum execution time: 239_663_000 picoseconds. - Weight::from_parts(266_124_565, 6793) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `856 + r * (45 ±0)` + // Estimated: `9271 + r * (45 ±0)` + // Minimum execution time: 245_714_000 picoseconds. + Weight::from_parts(272_527_059, 9271) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) } @@ -966,6 +1014,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -979,19 +1029,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_return_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863` - // Estimated: `6810` - // Minimum execution time: 241_763_000 picoseconds. - Weight::from_parts(266_535_552, 6810) + // Measured: `866` + // Estimated: `9288` + // Minimum execution time: 256_060_000 picoseconds. + Weight::from_parts(276_710_811, 9288) // Standard Error: 0 - .saturating_add(Weight::from_parts(320, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(312, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:1 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) @@ -1005,28 +1057,30 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `Contracts::DeletionQueue` (r:0 w:1) /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2972 + r * (316 ±0)` - // Estimated: `8912 + r * (5266 ±0)` - // Minimum execution time: 265_888_000 picoseconds. - Weight::from_parts(291_232_232, 8912) - // Standard Error: 845_475 - .saturating_add(Weight::from_parts(104_398_867, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(r.into()))) + // Measured: `2902 + r * (529 ±0)` + // Estimated: `11317 + r * (5479 ±0)` + // Minimum execution time: 270_479_000 picoseconds. + Weight::from_parts(296_706_989, 11317) + // Standard Error: 813_407 + .saturating_add(Weight::from_parts(109_236_910, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) + .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((10_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 5266).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 5479).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1042,13 +1096,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_random(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `944 + r * (10 ±0)` - // Estimated: `6885 + r * (10 ±0)` - // Minimum execution time: 248_500_000 picoseconds. - Weight::from_parts(282_353_053, 6885) - // Standard Error: 1_144 - .saturating_add(Weight::from_parts(1_193_841, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `947 + r * (10 ±0)` + // Estimated: `9363 + r * (10 ±0)` + // Minimum execution time: 251_787_000 picoseconds. + Weight::from_parts(284_036_313, 9363) + // Standard Error: 2_560 + .saturating_add(Weight::from_parts(1_238_301, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } @@ -1056,6 +1110,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1069,13 +1125,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_deposit_event(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (10 ±0)` - // Estimated: `6805 + r * (10 ±0)` - // Minimum execution time: 248_130_000 picoseconds. - Weight::from_parts(279_583_178, 6805) - // Standard Error: 971 - .saturating_add(Weight::from_parts(1_987_941, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `866 + r * (10 ±0)` + // Estimated: `9283 + r * (10 ±0)` + // Minimum execution time: 250_566_000 picoseconds. + Weight::from_parts(275_402_784, 9283) + // Standard Error: 3_939 + .saturating_add(Weight::from_parts(1_941_995, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } @@ -1083,6 +1139,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1097,15 +1155,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `880 + t * (32 ±0)` - // Estimated: `6825 + t * (2508 ±0)` - // Minimum execution time: 258_594_000 picoseconds. - Weight::from_parts(276_734_422, 6825) - // Standard Error: 102_093 - .saturating_add(Weight::from_parts(2_559_383, 0).saturating_mul(t.into())) + // Measured: `883 + t * (32 ±0)` + // Estimated: `9303 + t * (2508 ±0)` + // Minimum execution time: 267_544_000 picoseconds. + Weight::from_parts(280_117_825, 9303) + // Standard Error: 102_111 + .saturating_add(Weight::from_parts(3_253_038, 0).saturating_mul(t.into())) // Standard Error: 28 - .saturating_add(Weight::from_parts(501, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(668, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) @@ -1115,6 +1173,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1128,13 +1188,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_debug_message(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `862 + r * (7 ±0)` - // Estimated: `6807 + r * (7 ±0)` - // Minimum execution time: 154_564_000 picoseconds. - Weight::from_parts(168_931_365, 6807) - // Standard Error: 349 - .saturating_add(Weight::from_parts(226_848, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `865 + r * (7 ±0)` + // Estimated: `9285 + r * (7 ±0)` + // Minimum execution time: 157_769_000 picoseconds. + Weight::from_parts(173_026_565, 9285) + // Standard Error: 405 + .saturating_add(Weight::from_parts(219_727, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) } @@ -1142,6 +1202,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1155,13 +1217,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 1048576]`. fn seal_debug_message_per_byte(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `125813` - // Estimated: `131755` - // Minimum execution time: 394_382_000 picoseconds. - Weight::from_parts(376_780_500, 131755) + // Measured: `125816` + // Estimated: `131758` + // Minimum execution time: 403_961_000 picoseconds. + Weight::from_parts(376_480_872, 131758) // Standard Error: 12 - .saturating_add(Weight::from_parts(1_026, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_041, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1169,13 +1231,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_set_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `924 + r * (292 ±0)` - // Estimated: `926 + r * (293 ±0)` - // Minimum execution time: 249_757_000 picoseconds. - Weight::from_parts(177_324_374, 926) - // Standard Error: 9_512 - .saturating_add(Weight::from_parts(6_176_717, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `927 + r * (292 ±0)` + // Estimated: `929 + r * (293 ±0)` + // Minimum execution time: 256_272_000 picoseconds. + Weight::from_parts(181_058_379, 929) + // Standard Error: 9_838 + .saturating_add(Weight::from_parts(6_316_677, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1186,13 +1248,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1447` - // Estimated: `1430` - // Minimum execution time: 267_564_000 picoseconds. - Weight::from_parts(328_701_080, 1430) - // Standard Error: 61 - .saturating_add(Weight::from_parts(576, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) + // Measured: `1450` + // Estimated: `1433` + // Minimum execution time: 268_713_000 picoseconds. + Weight::from_parts(328_329_131, 1433) + // Standard Error: 66 + .saturating_add(Weight::from_parts(962, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1200,13 +1262,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1253 + n * (1 ±0)` - // Estimated: `1253 + n * (1 ±0)` - // Minimum execution time: 266_347_000 picoseconds. - Weight::from_parts(289_824_718, 1253) - // Standard Error: 34 - .saturating_add(Weight::from_parts(184, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1256 + n * (1 ±0)` + // Estimated: `1256 + n * (1 ±0)` + // Minimum execution time: 265_683_000 picoseconds. + Weight::from_parts(293_784_477, 1256) + // Standard Error: 31 + .saturating_add(Weight::from_parts(377, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1215,13 +1277,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_clear_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (288 ±0)` - // Estimated: `927 + r * (289 ±0)` - // Minimum execution time: 247_207_000 picoseconds. - Weight::from_parts(179_856_075, 927) - // Standard Error: 9_383 - .saturating_add(Weight::from_parts(6_053_198, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `924 + r * (288 ±0)` + // Estimated: `930 + r * (289 ±0)` + // Minimum execution time: 269_959_000 picoseconds. + Weight::from_parts(186_065_911, 930) + // Standard Error: 9_679 + .saturating_add(Weight::from_parts(6_286_855, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1232,13 +1294,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_clear_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1249 + n * (1 ±0)` - // Estimated: `1249 + n * (1 ±0)` - // Minimum execution time: 262_655_000 picoseconds. - Weight::from_parts(289_482_543, 1249) - // Standard Error: 35 - .saturating_add(Weight::from_parts(92, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1252 + n * (1 ±0)` + // Estimated: `1252 + n * (1 ±0)` + // Minimum execution time: 265_805_000 picoseconds. + Weight::from_parts(294_633_695, 1252) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1247,13 +1307,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_get_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (296 ±0)` - // Estimated: `923 + r * (297 ±0)` - // Minimum execution time: 247_414_000 picoseconds. - Weight::from_parts(203_317_182, 923) - // Standard Error: 7_191 - .saturating_add(Weight::from_parts(4_925_154, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `924 + r * (296 ±0)` + // Estimated: `926 + r * (297 ±0)` + // Minimum execution time: 264_592_000 picoseconds. + Weight::from_parts(204_670_461, 926) + // Standard Error: 6_869 + .saturating_add(Weight::from_parts(5_137_920, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) @@ -1263,13 +1323,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_get_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1265 + n * (1 ±0)` - // Estimated: `1265 + n * (1 ±0)` - // Minimum execution time: 258_910_000 picoseconds. - Weight::from_parts(283_086_514, 1265) - // Standard Error: 39 - .saturating_add(Weight::from_parts(980, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1268 + n * (1 ±0)` + // Estimated: `1268 + n * (1 ±0)` + // Minimum execution time: 273_165_000 picoseconds. + Weight::from_parts(297_883_669, 1268) + // Standard Error: 37 + .saturating_add(Weight::from_parts(576, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1278,13 +1338,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_contains_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `932 + r * (288 ±0)` - // Estimated: `929 + r * (289 ±0)` - // Minimum execution time: 252_410_000 picoseconds. - Weight::from_parts(201_227_879, 929) - // Standard Error: 6_899 - .saturating_add(Weight::from_parts(4_774_983, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `935 + r * (288 ±0)` + // Estimated: `932 + r * (289 ±0)` + // Minimum execution time: 252_257_000 picoseconds. + Weight::from_parts(205_018_595, 932) + // Standard Error: 7_605 + .saturating_add(Weight::from_parts(4_933_378, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) @@ -1294,13 +1354,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_contains_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1252 + n * (1 ±0)` - // Estimated: `1252 + n * (1 ±0)` - // Minimum execution time: 259_053_000 picoseconds. - Weight::from_parts(283_392_084, 1252) - // Standard Error: 41 - .saturating_add(Weight::from_parts(213, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1255 + n * (1 ±0)` + // Estimated: `1255 + n * (1 ±0)` + // Minimum execution time: 266_839_000 picoseconds. + Weight::from_parts(292_064_487, 1255) + // Standard Error: 34 + .saturating_add(Weight::from_parts(194, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1309,13 +1369,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_take_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `914 + r * (296 ±0)` - // Estimated: `919 + r * (297 ±0)` - // Minimum execution time: 251_371_000 picoseconds. - Weight::from_parts(177_119_717, 919) - // Standard Error: 9_421 - .saturating_add(Weight::from_parts(6_226_005, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `917 + r * (296 ±0)` + // Estimated: `922 + r * (297 ±0)` + // Minimum execution time: 266_072_000 picoseconds. + Weight::from_parts(189_018_352, 922) + // Standard Error: 9_530 + .saturating_add(Weight::from_parts(6_481_011, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1326,13 +1386,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_take_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1266 + n * (1 ±0)` - // Estimated: `1266 + n * (1 ±0)` - // Minimum execution time: 263_350_000 picoseconds. - Weight::from_parts(284_323_917, 1266) - // Standard Error: 31 - .saturating_add(Weight::from_parts(921, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1269 + n * (1 ±0)` + // Estimated: `1269 + n * (1 ±0)` + // Minimum execution time: 270_703_000 picoseconds. + Weight::from_parts(292_867_105, 1269) + // Standard Error: 30 + .saturating_add(Weight::from_parts(800, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1340,6 +1400,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1602 w:1601) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1353,13 +1415,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_transfer(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1415 + r * (45 ±0)` - // Estimated: `7307 + r * (2520 ±0)` - // Minimum execution time: 248_701_000 picoseconds. - Weight::from_parts(17_811_969, 7307) - // Standard Error: 35_154 - .saturating_add(Weight::from_parts(31_809_738, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1418 + r * (45 ±0)` + // Estimated: `9785 + r * (2520 ±0)` + // Minimum execution time: 251_843_000 picoseconds. + Weight::from_parts(181_026_888, 9785) + // Standard Error: 38_221 + .saturating_add(Weight::from_parts(30_626_681, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1369,6 +1431,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) @@ -1382,13 +1446,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1260 + r * (245 ±0)` - // Estimated: `9440 + r * (2721 ±0)` - // Minimum execution time: 247_335_000 picoseconds. - Weight::from_parts(264_025_000, 9440) - // Standard Error: 121_299 - .saturating_add(Weight::from_parts(234_770_827, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `1263 + r * (245 ±0)` + // Estimated: `9635 + r * (2721 ±0)` + // Minimum execution time: 254_881_000 picoseconds. + Weight::from_parts(272_871_000, 9635) + // Standard Error: 94_527 + .saturating_add(Weight::from_parts(233_379_497, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) @@ -1398,6 +1462,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:736 w:0) @@ -1412,12 +1478,12 @@ impl WeightInfo for SubstrateWeight { fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (576 ±0)` - // Estimated: `6812 + r * (2637 ±3)` - // Minimum execution time: 261_011_000 picoseconds. - Weight::from_parts(264_554_000, 6812) - // Standard Error: 104_415 - .saturating_add(Weight::from_parts(231_627_084, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Estimated: `9290 + r * (2637 ±3)` + // Minimum execution time: 266_704_000 picoseconds. + Weight::from_parts(273_165_000, 9290) + // Standard Error: 141_486 + .saturating_add(Weight::from_parts(232_947_049, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1427,6 +1493,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) @@ -1441,15 +1509,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1048576]`. fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1307 + t * (277 ±0)` - // Estimated: `12197 + t * (5227 ±0)` - // Minimum execution time: 445_561_000 picoseconds. - Weight::from_parts(62_287_490, 12197) - // Standard Error: 11_797_697 - .saturating_add(Weight::from_parts(357_530_529, 0).saturating_mul(t.into())) + // Measured: `1310 + t * (277 ±0)` + // Estimated: `12200 + t * (5227 ±0)` + // Minimum execution time: 446_451_000 picoseconds. + Weight::from_parts(68_175_222, 12200) + // Standard Error: 11_619_250 + .saturating_add(Weight::from_parts(354_237_260, 0).saturating_mul(t.into())) // Standard Error: 17 - .saturating_add(Weight::from_parts(970, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(Weight::from_parts(987, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(16_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(t.into()))) @@ -1459,6 +1527,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:802 w:802) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:801 w:800) @@ -1472,17 +1542,17 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:803 w:803) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:800 w:800) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `r` is `[1, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1278 + r * (255 ±0)` - // Estimated: `9620 + r * (2731 ±0)` - // Minimum execution time: 621_897_000 picoseconds. - Weight::from_parts(631_687_000, 9620) - // Standard Error: 215_241 - .saturating_add(Weight::from_parts(350_527_831, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `1281 + r * (255 ±0)` + // Estimated: `9623 + r * (2731 ±0)` + // Minimum execution time: 628_156_000 picoseconds. + Weight::from_parts(642_052_000, 9623) + // Standard Error: 223_957 + .saturating_add(Weight::from_parts(348_660_264, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(7_u64)) .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(r.into()))) @@ -1492,6 +1562,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:1) @@ -1505,23 +1577,23 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1303 + t * (104 ±0)` - // Estimated: `12211 + t * (2549 ±1)` - // Minimum execution time: 2_181_184_000 picoseconds. - Weight::from_parts(1_194_190_111, 12211) - // Standard Error: 11_578_766 - .saturating_add(Weight::from_parts(6_361_884, 0).saturating_mul(t.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_025, 0).saturating_mul(i.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_158, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(16_u64)) + // Measured: `1306 + t * (104 ±0)` + // Estimated: `12214 + t * (2549 ±1)` + // Minimum execution time: 2_106_379_000 picoseconds. + Weight::from_parts(1_098_227_098, 12214) + // Standard Error: 12_549_729 + .saturating_add(Weight::from_parts(21_628_382, 0).saturating_mul(t.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_059, 0).saturating_mul(i.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_220, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(19_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) @@ -1531,6 +1603,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1544,13 +1618,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `862 + r * (8 ±0)` - // Estimated: `6801 + r * (8 ±0)` - // Minimum execution time: 241_609_000 picoseconds. - Weight::from_parts(268_716_874, 6801) - // Standard Error: 617 - .saturating_add(Weight::from_parts(377_753, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `865 + r * (8 ±0)` + // Estimated: `9279 + r * (8 ±0)` + // Minimum execution time: 257_102_000 picoseconds. + Weight::from_parts(278_920_692, 9279) + // Standard Error: 464 + .saturating_add(Weight::from_parts(374_120, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -1558,6 +1632,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1571,19 +1647,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `870` - // Estimated: `6808` - // Minimum execution time: 261_296_000 picoseconds. - Weight::from_parts(255_531_654, 6808) + // Measured: `873` + // Estimated: `9286` + // Minimum execution time: 266_609_000 picoseconds. + Weight::from_parts(263_034_132, 9286) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_081, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_082, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1597,13 +1675,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 ±0)` - // Estimated: `6806 + r * (8 ±0)` - // Minimum execution time: 243_583_000 picoseconds. - Weight::from_parts(270_025_058, 6806) - // Standard Error: 560 - .saturating_add(Weight::from_parts(767_519, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 ±0)` + // Estimated: `9284 + r * (8 ±0)` + // Minimum execution time: 246_275_000 picoseconds. + Weight::from_parts(279_091_594, 9284) + // Standard Error: 887 + .saturating_add(Weight::from_parts(810_394, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -1611,6 +1689,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1624,19 +1704,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6814` - // Minimum execution time: 253_798_000 picoseconds. - Weight::from_parts(265_542_351, 6814) + // Measured: `875` + // Estimated: `9292` + // Minimum execution time: 249_254_000 picoseconds. + Weight::from_parts(270_833_823, 9292) // Standard Error: 0 - .saturating_add(Weight::from_parts(3_343, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(3_347, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1650,13 +1732,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 ±0)` - // Estimated: `6808 + r * (8 ±0)` - // Minimum execution time: 247_332_000 picoseconds. - Weight::from_parts(269_183_656, 6808) - // Standard Error: 665 - .saturating_add(Weight::from_parts(443_386, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 ±0)` + // Estimated: `9286 + r * (8 ±0)` + // Minimum execution time: 259_131_000 picoseconds. + Weight::from_parts(272_665_020, 9286) + // Standard Error: 862 + .saturating_add(Weight::from_parts(451_247, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -1664,6 +1746,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1677,19 +1761,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6813` - // Minimum execution time: 250_855_000 picoseconds. - Weight::from_parts(258_752_975, 6813) + // Measured: `875` + // Estimated: `9291` + // Minimum execution time: 276_465_000 picoseconds. + Weight::from_parts(269_450_297, 9291) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_190, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1703,13 +1789,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 ±0)` - // Estimated: `6805 + r * (8 ±0)` - // Minimum execution time: 240_733_000 picoseconds. - Weight::from_parts(269_134_358, 6805) - // Standard Error: 512 - .saturating_add(Weight::from_parts(440_043, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 ±0)` + // Estimated: `9283 + r * (8 ±0)` + // Minimum execution time: 248_349_000 picoseconds. + Weight::from_parts(273_660_686, 9283) + // Standard Error: 656 + .saturating_add(Weight::from_parts(444_293, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -1717,6 +1803,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1730,19 +1818,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6811` - // Minimum execution time: 247_377_000 picoseconds. - Weight::from_parts(261_077_322, 6811) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_195, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `875` + // Estimated: `9289` + // Minimum execution time: 252_107_000 picoseconds. + Weight::from_parts(267_427_726, 9289) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1756,13 +1846,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `997 + n * (1 ±0)` - // Estimated: `6934 + n * (1 ±0)` - // Minimum execution time: 307_337_000 picoseconds. - Weight::from_parts(326_710_473, 6934) - // Standard Error: 9 - .saturating_add(Weight::from_parts(5_765, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `1000 + n * (1 ±0)` + // Estimated: `9412 + n * (1 ±0)` + // Minimum execution time: 311_252_000 picoseconds. + Weight::from_parts(333_250_773, 9412) + // Standard Error: 12 + .saturating_add(Weight::from_parts(5_668, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1770,6 +1860,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1783,13 +1875,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `805 + r * (112 ±0)` - // Estimated: `6748 + r * (112 ±0)` - // Minimum execution time: 245_432_000 picoseconds. - Weight::from_parts(294_206_377, 6748) - // Standard Error: 7_229 - .saturating_add(Weight::from_parts(41_480_485, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `810 + r * (112 ±0)` + // Estimated: `9226 + r * (112 ±0)` + // Minimum execution time: 267_431_000 picoseconds. + Weight::from_parts(300_733_048, 9226) + // Standard Error: 8_806 + .saturating_add(Weight::from_parts(42_169_197, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) } @@ -1797,6 +1889,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1810,13 +1904,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `907 + r * (76 ±0)` - // Estimated: `6802 + r * (77 ±0)` - // Minimum execution time: 247_788_000 picoseconds. - Weight::from_parts(303_940_062, 6802) - // Standard Error: 10_671 - .saturating_add(Weight::from_parts(45_730_772, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `910 + r * (76 ±0)` + // Estimated: `9279 + r * (77 ±0)` + // Minimum execution time: 265_905_000 picoseconds. + Weight::from_parts(305_381_951, 9279) + // Standard Error: 9_863 + .saturating_add(Weight::from_parts(45_771_182, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) } @@ -1824,6 +1918,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1837,13 +1933,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `877 + r * (42 ±0)` - // Estimated: `6816 + r * (42 ±0)` - // Minimum execution time: 248_825_000 picoseconds. - Weight::from_parts(286_832_225, 6816) - // Standard Error: 5_274 - .saturating_add(Weight::from_parts(11_889_262, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `880 + r * (42 ±0)` + // Estimated: `9294 + r * (42 ±0)` + // Minimum execution time: 266_701_000 picoseconds. + Weight::from_parts(292_407_888, 9294) + // Standard Error: 5_394 + .saturating_add(Weight::from_parts(11_896_065, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) } @@ -1851,6 +1947,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) @@ -1865,12 +1963,12 @@ impl WeightInfo for SubstrateWeight { fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (965 ±0)` - // Estimated: `6807 + r * (3090 ±7)` - // Minimum execution time: 244_982_000 picoseconds. - Weight::from_parts(265_297_000, 6807) - // Standard Error: 39_895 - .saturating_add(Weight::from_parts(22_435_888, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Estimated: `9285 + r * (3090 ±7)` + // Minimum execution time: 268_755_000 picoseconds. + Weight::from_parts(272_300_000, 9285) + // Standard Error: 43_747 + .saturating_add(Weight::from_parts(25_001_889, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) @@ -1880,6 +1978,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) @@ -1893,22 +1993,24 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 32]`. fn lock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `928 + r * (131 ±0)` - // Estimated: `6878 + r * (2606 ±0)` - // Minimum execution time: 246_455_000 picoseconds. - Weight::from_parts(275_334_919, 6878) - // Standard Error: 20_911 - .saturating_add(Weight::from_parts(6_427_525, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `937 + r * (131 ±0)` + // Estimated: `9346 + r * (2607 ±0)` + // Minimum execution time: 255_190_000 picoseconds. + Weight::from_parts(285_643_922, 9346) + // Standard Error: 23_582 + .saturating_add(Weight::from_parts(6_289_940, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2606).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2607).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) @@ -1922,13 +2024,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 32]`. fn unlock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `969 + r * (183 ±0)` + // Measured: `972 + r * (184 ±0)` // Estimated: `129453 + r * (2568 ±0)` - // Minimum execution time: 254_472_000 picoseconds. - Weight::from_parts(280_657_909, 129453) - // Standard Error: 20_131 - .saturating_add(Weight::from_parts(5_644_006, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Minimum execution time: 260_061_000 picoseconds. + Weight::from_parts(286_849_343, 129453) + // Standard Error: 22_406 + .saturating_add(Weight::from_parts(5_661_459, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1938,6 +2040,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1951,13 +2055,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `858 + r * (3 ±0)` - // Estimated: `6804 + r * (3 ±0)` - // Minimum execution time: 250_535_000 picoseconds. - Weight::from_parts(270_318_376, 6804) - // Standard Error: 386 - .saturating_add(Weight::from_parts(174_627, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `861 + r * (3 ±0)` + // Estimated: `9282 + r * (3 ±0)` + // Minimum execution time: 250_922_000 picoseconds. + Weight::from_parts(277_294_913, 9282) + // Standard Error: 391 + .saturating_add(Weight::from_parts(169_457, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -1965,6 +2069,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1978,13 +2084,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_account_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2109 + r * (39 ±0)` - // Estimated: `7899 + r * (40 ±0)` - // Minimum execution time: 248_174_000 picoseconds. - Weight::from_parts(301_826_520, 7899) - // Standard Error: 801 - .saturating_add(Weight::from_parts(248_479, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `2112 + r * (39 ±0)` + // Estimated: `10377 + r * (40 ±0)` + // Minimum execution time: 267_293_000 picoseconds. + Weight::from_parts(309_125_635, 10377) + // Standard Error: 637 + .saturating_add(Weight::from_parts(245_508, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) } @@ -1992,6 +2098,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2007,13 +2115,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_instantiation_nonce(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `861 + r * (3 ±0)` - // Estimated: `6801 + r * (3 ±0)` - // Minimum execution time: 246_540_000 picoseconds. - Weight::from_parts(268_913_509, 6801) - // Standard Error: 378 - .saturating_add(Weight::from_parts(154_950, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `864 + r * (3 ±0)` + // Estimated: `9279 + r * (3 ±0)` + // Minimum execution time: 256_057_000 picoseconds. + Weight::from_parts(278_068_870, 9279) + // Standard Error: 335 + .saturating_add(Weight::from_parts(152_083, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -2022,10 +2130,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_777_000 picoseconds. - Weight::from_parts(1_707_601, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(15_392, 0).saturating_mul(r.into())) + // Minimum execution time: 1_846_000 picoseconds. + Weight::from_parts(1_808_029, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(14_937, 0).saturating_mul(r.into())) } } @@ -2037,8 +2145,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 1_997_000 picoseconds. - Weight::from_parts(2_130_000, 1627) + // Minimum execution time: 2_103_000 picoseconds. + Weight::from_parts(2_260_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -2048,10 +2156,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 12_276_000 picoseconds. - Weight::from_parts(1_593_881, 442) - // Standard Error: 1_135 - .saturating_add(Weight::from_parts(1_109_302, 0).saturating_mul(k.into())) + // Minimum execution time: 12_173_000 picoseconds. + Weight::from_parts(12_515_000, 442) + // Standard Error: 1_033 + .saturating_add(Weight::from_parts(1_075_765, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -2065,8 +2173,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 8_176_000 picoseconds. - Weight::from_parts(8_555_388, 6149) + // Minimum execution time: 8_054_000 picoseconds. + Weight::from_parts(8_503_485, 6149) // Standard Error: 1 .saturating_add(Weight::from_parts(1_184, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) @@ -2081,8 +2189,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_270_000 picoseconds. - Weight::from_parts(16_779_000, 6450) + // Minimum execution time: 16_966_000 picoseconds. + Weight::from_parts(17_445_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2095,10 +2203,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_572_000 picoseconds. - Weight::from_parts(1_950_905, 3635) - // Standard Error: 1_597 - .saturating_add(Weight::from_parts(1_123_190, 0).saturating_mul(k.into())) + // Minimum execution time: 3_643_000 picoseconds. + Weight::from_parts(3_714_000, 3635) + // Standard Error: 1_056 + .saturating_add(Weight::from_parts(1_089_042, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -2108,6 +2216,8 @@ impl WeightInfo for () { /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) @@ -2115,13 +2225,13 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn v12_migration_step(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `325 + c * (1 ±0)` - // Estimated: `6263 + c * (1 ±0)` - // Minimum execution time: 16_873_000 picoseconds. - Weight::from_parts(16_790_402, 6263) + // Measured: `328 + c * (1 ±0)` + // Estimated: `6266 + c * (1 ±0)` + // Minimum execution time: 19_891_000 picoseconds. + Weight::from_parts(19_961_608, 6266) // Standard Error: 1 - .saturating_add(Weight::from_parts(396, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(Weight::from_parts(429, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } @@ -2131,8 +2241,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 11_904_000 picoseconds. - Weight::from_parts(12_785_000, 6380) + // Minimum execution time: 12_483_000 picoseconds. + Weight::from_parts(13_205_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2141,13 +2251,13 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) fn v14_migration_step() -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 44_920_000 picoseconds. - Weight::from_parts(46_163_000, 6292) + // Minimum execution time: 42_443_000 picoseconds. + Weight::from_parts(43_420_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2159,8 +2269,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 53_864_000 picoseconds. - Weight::from_parts(55_139_000, 6534) + // Minimum execution time: 52_282_000 picoseconds. + Weight::from_parts(54_110_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -2170,8 +2280,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_375_000 picoseconds. - Weight::from_parts(2_487_000, 1627) + // Minimum execution time: 2_539_000 picoseconds. + Weight::from_parts(2_644_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2183,8 +2293,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 11_580_000 picoseconds. - Weight::from_parts(11_980_000, 3631) + // Minimum execution time: 9_473_000 picoseconds. + Weight::from_parts(9_907_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -2194,8 +2304,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_557_000 picoseconds. - Weight::from_parts(4_807_000, 3607) + // Minimum execution time: 3_532_000 picoseconds. + Weight::from_parts(3_768_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -2206,8 +2316,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_253_000 picoseconds. - Weight::from_parts(6_479_000, 3632) + // Minimum execution time: 5_036_000 picoseconds. + Weight::from_parts(5_208_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -2218,13 +2328,15 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_166_000 picoseconds. - Weight::from_parts(6_545_000, 3607) + // Minimum execution time: 4_881_000 picoseconds. + Weight::from_parts(5_149_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2240,22 +2352,24 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `801 + c * (1 ±0)` - // Estimated: `6739 + c * (1 ±0)` - // Minimum execution time: 282_232_000 picoseconds. - Weight::from_parts(266_148_573, 6739) - // Standard Error: 69 - .saturating_add(Weight::from_parts(34_592, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `804 + c * (1 ±0)` + // Estimated: `9217 + c * (1 ±0)` + // Minimum execution time: 280_047_000 picoseconds. + Weight::from_parts(270_065_882, 9217) + // Standard Error: 86 + .saturating_add(Weight::from_parts(34_361, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:3 w:3) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) @@ -2273,17 +2387,17 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `323` - // Estimated: `8737` - // Minimum execution time: 3_760_879_000 picoseconds. - Weight::from_parts(794_812_431, 8737) + // Measured: `326` + // Estimated: `8740` + // Minimum execution time: 3_776_071_000 picoseconds. + Weight::from_parts(706_212_213, 8740) // Standard Error: 149 - .saturating_add(Weight::from_parts(101_881, 0).saturating_mul(c.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_404, 0).saturating_mul(i.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_544, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + .saturating_add(Weight::from_parts(98_798, 0).saturating_mul(c.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_560, 0).saturating_mul(i.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_476, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -2292,6 +2406,8 @@ impl WeightInfo for () { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -2303,24 +2419,26 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `560` - // Estimated: `6504` - // Minimum execution time: 1_953_162_000 picoseconds. - Weight::from_parts(374_252_840, 6504) + // Measured: `563` + // Estimated: `8982` + // Minimum execution time: 1_966_301_000 picoseconds. + Weight::from_parts(376_287_434, 8982) // Standard Error: 7 - .saturating_add(Weight::from_parts(1_630, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_643, 0).saturating_mul(i.into())) // Standard Error: 7 - .saturating_add(Weight::from_parts(1_650, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(10_u64)) + .saturating_add(Weight::from_parts(1_667, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2335,19 +2453,21 @@ impl WeightInfo for () { /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `826` - // Estimated: `6766` - // Minimum execution time: 187_899_000 picoseconds. - Weight::from_parts(195_510_000, 6766) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `829` + // Estimated: `9244` + // Minimum execution time: 197_571_000 picoseconds. + Weight::from_parts(206_612_000, 9244) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:1 w:1) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) @@ -2355,13 +2475,13 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn upload_code(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 254_800_000 picoseconds. - Weight::from_parts(285_603_050, 3607) - // Standard Error: 62 - .saturating_add(Weight::from_parts(66_212, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Measured: `145` + // Estimated: `6085` + // Minimum execution time: 266_006_000 picoseconds. + Weight::from_parts(287_700_788, 6085) + // Standard Error: 67 + .saturating_add(Weight::from_parts(63_196, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -2369,7 +2489,7 @@ impl WeightInfo for () { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:1 w:1) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) @@ -2378,8 +2498,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 43_553_000 picoseconds. - Weight::from_parts(45_036_000, 3780) + // Minimum execution time: 42_714_000 picoseconds. + Weight::from_parts(44_713_000, 3780) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2395,8 +2515,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 33_223_000 picoseconds. - Weight::from_parts(34_385_000, 8967) + // Minimum execution time: 33_516_000 picoseconds. + Weight::from_parts(34_823_000, 8967) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -2404,6 +2524,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2417,13 +2539,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_caller(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `866 + r * (6 ±0)` - // Estimated: `6806 + r * (6 ±0)` - // Minimum execution time: 254_213_000 picoseconds. - Weight::from_parts(273_464_980, 6806) - // Standard Error: 1_362 - .saturating_add(Weight::from_parts(322_619, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `869 + r * (6 ±0)` + // Estimated: `9284 + r * (6 ±0)` + // Minimum execution time: 246_563_000 picoseconds. + Weight::from_parts(274_999_814, 9284) + // Standard Error: 628 + .saturating_add(Weight::from_parts(347_395, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2431,6 +2553,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2444,13 +2568,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_is_contract(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `922 + r * (209 ±0)` - // Estimated: `6826 + r * (2684 ±0)` - // Minimum execution time: 250_273_000 picoseconds. - Weight::from_parts(122_072_782, 6826) - // Standard Error: 5_629 - .saturating_add(Weight::from_parts(3_490_256, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `925 + r * (209 ±0)` + // Estimated: `9304 + r * (2684 ±0)` + // Minimum execution time: 252_236_000 picoseconds. + Weight::from_parts(121_390_081, 9304) + // Standard Error: 6_206 + .saturating_add(Weight::from_parts(3_558_718, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 2684).saturating_mul(r.into())) @@ -2459,6 +2583,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2472,13 +2598,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (213 ±0)` - // Estimated: `6830 + r * (2688 ±0)` - // Minimum execution time: 255_187_000 picoseconds. - Weight::from_parts(118_082_505, 6830) - // Standard Error: 6_302 - .saturating_add(Weight::from_parts(4_246_968, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `924 + r * (213 ±0)` + // Estimated: `9308 + r * (2688 ±0)` + // Minimum execution time: 269_427_000 picoseconds. + Weight::from_parts(134_879_389, 9308) + // Standard Error: 6_711 + .saturating_add(Weight::from_parts(4_408_658, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 2688).saturating_mul(r.into())) @@ -2487,6 +2613,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2500,13 +2628,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_own_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873 + r * (6 ±0)` - // Estimated: `6815 + r * (6 ±0)` - // Minimum execution time: 256_833_000 picoseconds. - Weight::from_parts(273_330_216, 6815) - // Standard Error: 881 - .saturating_add(Weight::from_parts(400_105, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `876 + r * (6 ±0)` + // Estimated: `9293 + r * (6 ±0)` + // Minimum execution time: 249_382_000 picoseconds. + Weight::from_parts(266_305_110, 9293) + // Standard Error: 1_592 + .saturating_add(Weight::from_parts(460_001, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2514,6 +2642,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2527,18 +2657,20 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (3 ±0)` - // Estimated: `6804 + r * (3 ±0)` - // Minimum execution time: 244_193_000 picoseconds. - Weight::from_parts(271_221_908, 6804) - // Standard Error: 442 - .saturating_add(Weight::from_parts(176_480, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `866 + r * (3 ±0)` + // Estimated: `9282 + r * (3 ±0)` + // Minimum execution time: 252_145_000 picoseconds. + Weight::from_parts(277_211_668, 9282) + // Standard Error: 371 + .saturating_add(Weight::from_parts(174_394, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2552,13 +2684,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_root(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `753 + r * (3 ±0)` - // Estimated: `6693 + r * (3 ±0)` - // Minimum execution time: 232_603_000 picoseconds. - Weight::from_parts(260_577_368, 6693) - // Standard Error: 365 - .saturating_add(Weight::from_parts(158_126, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `756 + r * (3 ±0)` + // Estimated: `9171 + r * (3 ±0)` + // Minimum execution time: 251_595_000 picoseconds. + Weight::from_parts(268_102_575, 9171) + // Standard Error: 334 + .saturating_add(Weight::from_parts(154_836, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -2566,6 +2698,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2579,13 +2713,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `867 + r * (6 ±0)` - // Estimated: `6807 + r * (6 ±0)` - // Minimum execution time: 247_564_000 picoseconds. - Weight::from_parts(275_108_914, 6807) - // Standard Error: 505 - .saturating_add(Weight::from_parts(315_065, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `870 + r * (6 ±0)` + // Estimated: `9285 + r * (6 ±0)` + // Minimum execution time: 251_404_000 picoseconds. + Weight::from_parts(278_747_574, 9285) + // Standard Error: 782 + .saturating_add(Weight::from_parts(341_598, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2593,6 +2727,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2606,13 +2742,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (6 ±0)` - // Estimated: `6806 + r * (6 ±0)` - // Minimum execution time: 258_799_000 picoseconds. - Weight::from_parts(274_338_256, 6806) - // Standard Error: 632 - .saturating_add(Weight::from_parts(355_032, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `866 + r * (6 ±0)` + // Estimated: `9284 + r * (6 ±0)` + // Minimum execution time: 255_649_000 picoseconds. + Weight::from_parts(276_509_641, 9284) + // Standard Error: 1_262 + .saturating_add(Weight::from_parts(380_225, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2620,6 +2756,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:2 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2633,13 +2771,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1007 + r * (6 ±0)` - // Estimated: `6931 + r * (6 ±0)` - // Minimum execution time: 253_335_000 picoseconds. - Weight::from_parts(273_013_859, 6931) - // Standard Error: 2_007 - .saturating_add(Weight::from_parts(1_540_735, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1010 + r * (6 ±0)` + // Estimated: `9409 + r * (6 ±0)` + // Minimum execution time: 267_143_000 picoseconds. + Weight::from_parts(298_166_116, 9409) + // Standard Error: 3_765 + .saturating_add(Weight::from_parts(1_620_886, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2647,6 +2785,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2660,13 +2800,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_value_transferred(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `877 + r * (6 ±0)` - // Estimated: `6823 + r * (6 ±0)` - // Minimum execution time: 252_325_000 picoseconds. - Weight::from_parts(274_733_944, 6823) - // Standard Error: 603 - .saturating_add(Weight::from_parts(314_467, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `880 + r * (6 ±0)` + // Estimated: `9301 + r * (6 ±0)` + // Minimum execution time: 250_013_000 picoseconds. + Weight::from_parts(281_469_583, 9301) + // Standard Error: 730 + .saturating_add(Weight::from_parts(339_260, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2674,6 +2814,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2687,13 +2829,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `875 + r * (6 ±0)` - // Estimated: `6816 + r * (6 ±0)` - // Minimum execution time: 250_698_000 picoseconds. - Weight::from_parts(271_707_578, 6816) - // Standard Error: 952 - .saturating_add(Weight::from_parts(318_412, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `878 + r * (6 ±0)` + // Estimated: `9294 + r * (6 ±0)` + // Minimum execution time: 256_219_000 picoseconds. + Weight::from_parts(275_309_266, 9294) + // Standard Error: 1_199 + .saturating_add(Weight::from_parts(350_824, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2701,6 +2843,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2714,13 +2858,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872 + r * (6 ±0)` - // Estimated: `6819 + r * (6 ±0)` - // Minimum execution time: 251_854_000 picoseconds. - Weight::from_parts(272_002_212, 6819) - // Standard Error: 622 - .saturating_add(Weight::from_parts(313_353, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `875 + r * (6 ±0)` + // Estimated: `9297 + r * (6 ±0)` + // Minimum execution time: 264_644_000 picoseconds. + Weight::from_parts(283_856_744, 9297) + // Standard Error: 623 + .saturating_add(Weight::from_parts(334_175, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2728,6 +2872,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2741,13 +2887,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (6 ±0)` - // Estimated: `6804 + r * (6 ±0)` - // Minimum execution time: 252_010_000 picoseconds. - Weight::from_parts(270_387_000, 6804) - // Standard Error: 659 - .saturating_add(Weight::from_parts(325_856, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `866 + r * (6 ±0)` + // Estimated: `9282 + r * (6 ±0)` + // Minimum execution time: 263_364_000 picoseconds. + Weight::from_parts(281_379_508, 9282) + // Standard Error: 639 + .saturating_add(Weight::from_parts(338_021, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2755,6 +2901,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2770,13 +2918,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_weight_to_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `937 + r * (14 ±0)` - // Estimated: `6872 + r * (14 ±0)` - // Minimum execution time: 247_933_000 picoseconds. - Weight::from_parts(281_550_162, 6872) - // Standard Error: 660 - .saturating_add(Weight::from_parts(1_090_869, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `940 + r * (14 ±0)` + // Estimated: `9350 + r * (14 ±0)` + // Minimum execution time: 269_667_000 picoseconds. + Weight::from_parts(284_662_802, 9350) + // Standard Error: 691 + .saturating_add(Weight::from_parts(799_249, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 14).saturating_mul(r.into())) } @@ -2784,6 +2932,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2797,13 +2947,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_input(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `865 + r * (6 ±0)` - // Estimated: `6807 + r * (6 ±0)` - // Minimum execution time: 251_158_000 picoseconds. - Weight::from_parts(274_623_152, 6807) - // Standard Error: 491 - .saturating_add(Weight::from_parts(263_916, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `868 + r * (6 ±0)` + // Estimated: `9285 + r * (6 ±0)` + // Minimum execution time: 267_018_000 picoseconds. + Weight::from_parts(281_842_630, 9285) + // Standard Error: 453 + .saturating_add(Weight::from_parts(255_373, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2811,6 +2961,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2824,19 +2976,21 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_input_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `869` - // Estimated: `6809` - // Minimum execution time: 263_205_000 picoseconds. - Weight::from_parts(216_792_893, 6809) + // Measured: `872` + // Estimated: `9287` + // Minimum execution time: 258_208_000 picoseconds. + Weight::from_parts(227_686_792, 9287) // Standard Error: 23 .saturating_add(Weight::from_parts(989, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2850,11 +3004,11 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `853 + r * (45 ±0)` - // Estimated: `6793 + r * (45 ±0)` - // Minimum execution time: 239_663_000 picoseconds. - Weight::from_parts(266_124_565, 6793) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `856 + r * (45 ±0)` + // Estimated: `9271 + r * (45 ±0)` + // Minimum execution time: 245_714_000 picoseconds. + Weight::from_parts(272_527_059, 9271) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) } @@ -2862,6 +3016,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2875,19 +3031,21 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_return_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863` - // Estimated: `6810` - // Minimum execution time: 241_763_000 picoseconds. - Weight::from_parts(266_535_552, 6810) + // Measured: `866` + // Estimated: `9288` + // Minimum execution time: 256_060_000 picoseconds. + Weight::from_parts(276_710_811, 9288) // Standard Error: 0 - .saturating_add(Weight::from_parts(320, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(312, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:1 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) @@ -2901,28 +3059,30 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `Contracts::DeletionQueue` (r:0 w:1) /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2972 + r * (316 ±0)` - // Estimated: `8912 + r * (5266 ±0)` - // Minimum execution time: 265_888_000 picoseconds. - Weight::from_parts(291_232_232, 8912) - // Standard Error: 845_475 - .saturating_add(Weight::from_parts(104_398_867, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) - .saturating_add(RocksDbWeight::get().reads((7_u64).saturating_mul(r.into()))) + // Measured: `2902 + r * (529 ±0)` + // Estimated: `11317 + r * (5479 ±0)` + // Minimum execution time: 270_479_000 picoseconds. + Weight::from_parts(296_706_989, 11317) + // Standard Error: 813_407 + .saturating_add(Weight::from_parts(109_236_910, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) + .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((10_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 5266).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 5479).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2938,13 +3098,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_random(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `944 + r * (10 ±0)` - // Estimated: `6885 + r * (10 ±0)` - // Minimum execution time: 248_500_000 picoseconds. - Weight::from_parts(282_353_053, 6885) - // Standard Error: 1_144 - .saturating_add(Weight::from_parts(1_193_841, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `947 + r * (10 ±0)` + // Estimated: `9363 + r * (10 ±0)` + // Minimum execution time: 251_787_000 picoseconds. + Weight::from_parts(284_036_313, 9363) + // Standard Error: 2_560 + .saturating_add(Weight::from_parts(1_238_301, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } @@ -2952,6 +3112,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2965,13 +3127,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_deposit_event(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (10 ±0)` - // Estimated: `6805 + r * (10 ±0)` - // Minimum execution time: 248_130_000 picoseconds. - Weight::from_parts(279_583_178, 6805) - // Standard Error: 971 - .saturating_add(Weight::from_parts(1_987_941, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `866 + r * (10 ±0)` + // Estimated: `9283 + r * (10 ±0)` + // Minimum execution time: 250_566_000 picoseconds. + Weight::from_parts(275_402_784, 9283) + // Standard Error: 3_939 + .saturating_add(Weight::from_parts(1_941_995, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } @@ -2979,6 +3141,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2993,15 +3157,15 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `880 + t * (32 ±0)` - // Estimated: `6825 + t * (2508 ±0)` - // Minimum execution time: 258_594_000 picoseconds. - Weight::from_parts(276_734_422, 6825) - // Standard Error: 102_093 - .saturating_add(Weight::from_parts(2_559_383, 0).saturating_mul(t.into())) + // Measured: `883 + t * (32 ±0)` + // Estimated: `9303 + t * (2508 ±0)` + // Minimum execution time: 267_544_000 picoseconds. + Weight::from_parts(280_117_825, 9303) + // Standard Error: 102_111 + .saturating_add(Weight::from_parts(3_253_038, 0).saturating_mul(t.into())) // Standard Error: 28 - .saturating_add(Weight::from_parts(501, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(668, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) @@ -3011,6 +3175,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3024,13 +3190,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_debug_message(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `862 + r * (7 ±0)` - // Estimated: `6807 + r * (7 ±0)` - // Minimum execution time: 154_564_000 picoseconds. - Weight::from_parts(168_931_365, 6807) - // Standard Error: 349 - .saturating_add(Weight::from_parts(226_848, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `865 + r * (7 ±0)` + // Estimated: `9285 + r * (7 ±0)` + // Minimum execution time: 157_769_000 picoseconds. + Weight::from_parts(173_026_565, 9285) + // Standard Error: 405 + .saturating_add(Weight::from_parts(219_727, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) } @@ -3038,6 +3204,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3051,13 +3219,13 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 1048576]`. fn seal_debug_message_per_byte(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `125813` - // Estimated: `131755` - // Minimum execution time: 394_382_000 picoseconds. - Weight::from_parts(376_780_500, 131755) + // Measured: `125816` + // Estimated: `131758` + // Minimum execution time: 403_961_000 picoseconds. + Weight::from_parts(376_480_872, 131758) // Standard Error: 12 - .saturating_add(Weight::from_parts(1_026, 0).saturating_mul(i.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_041, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -3065,13 +3233,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_set_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `924 + r * (292 ±0)` - // Estimated: `926 + r * (293 ±0)` - // Minimum execution time: 249_757_000 picoseconds. - Weight::from_parts(177_324_374, 926) - // Standard Error: 9_512 - .saturating_add(Weight::from_parts(6_176_717, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `927 + r * (292 ±0)` + // Estimated: `929 + r * (293 ±0)` + // Minimum execution time: 256_272_000 picoseconds. + Weight::from_parts(181_058_379, 929) + // Standard Error: 9_838 + .saturating_add(Weight::from_parts(6_316_677, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3082,13 +3250,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1447` - // Estimated: `1430` - // Minimum execution time: 267_564_000 picoseconds. - Weight::from_parts(328_701_080, 1430) - // Standard Error: 61 - .saturating_add(Weight::from_parts(576, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) + // Measured: `1450` + // Estimated: `1433` + // Minimum execution time: 268_713_000 picoseconds. + Weight::from_parts(328_329_131, 1433) + // Standard Error: 66 + .saturating_add(Weight::from_parts(962, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -3096,13 +3264,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1253 + n * (1 ±0)` - // Estimated: `1253 + n * (1 ±0)` - // Minimum execution time: 266_347_000 picoseconds. - Weight::from_parts(289_824_718, 1253) - // Standard Error: 34 - .saturating_add(Weight::from_parts(184, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1256 + n * (1 ±0)` + // Estimated: `1256 + n * (1 ±0)` + // Minimum execution time: 265_683_000 picoseconds. + Weight::from_parts(293_784_477, 1256) + // Standard Error: 31 + .saturating_add(Weight::from_parts(377, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3111,13 +3279,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_clear_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (288 ±0)` - // Estimated: `927 + r * (289 ±0)` - // Minimum execution time: 247_207_000 picoseconds. - Weight::from_parts(179_856_075, 927) - // Standard Error: 9_383 - .saturating_add(Weight::from_parts(6_053_198, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `924 + r * (288 ±0)` + // Estimated: `930 + r * (289 ±0)` + // Minimum execution time: 269_959_000 picoseconds. + Weight::from_parts(186_065_911, 930) + // Standard Error: 9_679 + .saturating_add(Weight::from_parts(6_286_855, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3128,13 +3296,11 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_clear_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1249 + n * (1 ±0)` - // Estimated: `1249 + n * (1 ±0)` - // Minimum execution time: 262_655_000 picoseconds. - Weight::from_parts(289_482_543, 1249) - // Standard Error: 35 - .saturating_add(Weight::from_parts(92, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1252 + n * (1 ±0)` + // Estimated: `1252 + n * (1 ±0)` + // Minimum execution time: 265_805_000 picoseconds. + Weight::from_parts(294_633_695, 1252) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3143,13 +3309,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_get_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (296 ±0)` - // Estimated: `923 + r * (297 ±0)` - // Minimum execution time: 247_414_000 picoseconds. - Weight::from_parts(203_317_182, 923) - // Standard Error: 7_191 - .saturating_add(Weight::from_parts(4_925_154, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `924 + r * (296 ±0)` + // Estimated: `926 + r * (297 ±0)` + // Minimum execution time: 264_592_000 picoseconds. + Weight::from_parts(204_670_461, 926) + // Standard Error: 6_869 + .saturating_add(Weight::from_parts(5_137_920, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) @@ -3159,13 +3325,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_get_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1265 + n * (1 ±0)` - // Estimated: `1265 + n * (1 ±0)` - // Minimum execution time: 258_910_000 picoseconds. - Weight::from_parts(283_086_514, 1265) - // Standard Error: 39 - .saturating_add(Weight::from_parts(980, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1268 + n * (1 ±0)` + // Estimated: `1268 + n * (1 ±0)` + // Minimum execution time: 273_165_000 picoseconds. + Weight::from_parts(297_883_669, 1268) + // Standard Error: 37 + .saturating_add(Weight::from_parts(576, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3174,13 +3340,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_contains_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `932 + r * (288 ±0)` - // Estimated: `929 + r * (289 ±0)` - // Minimum execution time: 252_410_000 picoseconds. - Weight::from_parts(201_227_879, 929) - // Standard Error: 6_899 - .saturating_add(Weight::from_parts(4_774_983, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `935 + r * (288 ±0)` + // Estimated: `932 + r * (289 ±0)` + // Minimum execution time: 252_257_000 picoseconds. + Weight::from_parts(205_018_595, 932) + // Standard Error: 7_605 + .saturating_add(Weight::from_parts(4_933_378, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) @@ -3190,13 +3356,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_contains_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1252 + n * (1 ±0)` - // Estimated: `1252 + n * (1 ±0)` - // Minimum execution time: 259_053_000 picoseconds. - Weight::from_parts(283_392_084, 1252) - // Standard Error: 41 - .saturating_add(Weight::from_parts(213, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1255 + n * (1 ±0)` + // Estimated: `1255 + n * (1 ±0)` + // Minimum execution time: 266_839_000 picoseconds. + Weight::from_parts(292_064_487, 1255) + // Standard Error: 34 + .saturating_add(Weight::from_parts(194, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3205,13 +3371,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_take_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `914 + r * (296 ±0)` - // Estimated: `919 + r * (297 ±0)` - // Minimum execution time: 251_371_000 picoseconds. - Weight::from_parts(177_119_717, 919) - // Standard Error: 9_421 - .saturating_add(Weight::from_parts(6_226_005, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `917 + r * (296 ±0)` + // Estimated: `922 + r * (297 ±0)` + // Minimum execution time: 266_072_000 picoseconds. + Weight::from_parts(189_018_352, 922) + // Standard Error: 9_530 + .saturating_add(Weight::from_parts(6_481_011, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3222,13 +3388,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_take_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1266 + n * (1 ±0)` - // Estimated: `1266 + n * (1 ±0)` - // Minimum execution time: 263_350_000 picoseconds. - Weight::from_parts(284_323_917, 1266) - // Standard Error: 31 - .saturating_add(Weight::from_parts(921, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1269 + n * (1 ±0)` + // Estimated: `1269 + n * (1 ±0)` + // Minimum execution time: 270_703_000 picoseconds. + Weight::from_parts(292_867_105, 1269) + // Standard Error: 30 + .saturating_add(Weight::from_parts(800, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3236,6 +3402,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1602 w:1601) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3249,13 +3417,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_transfer(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1415 + r * (45 ±0)` - // Estimated: `7307 + r * (2520 ±0)` - // Minimum execution time: 248_701_000 picoseconds. - Weight::from_parts(17_811_969, 7307) - // Standard Error: 35_154 - .saturating_add(Weight::from_parts(31_809_738, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1418 + r * (45 ±0)` + // Estimated: `9785 + r * (2520 ±0)` + // Minimum execution time: 251_843_000 picoseconds. + Weight::from_parts(181_026_888, 9785) + // Standard Error: 38_221 + .saturating_add(Weight::from_parts(30_626_681, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3265,6 +3433,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) @@ -3278,13 +3448,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1260 + r * (245 ±0)` - // Estimated: `9440 + r * (2721 ±0)` - // Minimum execution time: 247_335_000 picoseconds. - Weight::from_parts(264_025_000, 9440) - // Standard Error: 121_299 - .saturating_add(Weight::from_parts(234_770_827, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `1263 + r * (245 ±0)` + // Estimated: `9635 + r * (2721 ±0)` + // Minimum execution time: 254_881_000 picoseconds. + Weight::from_parts(272_871_000, 9635) + // Standard Error: 94_527 + .saturating_add(Weight::from_parts(233_379_497, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) @@ -3294,6 +3464,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:736 w:0) @@ -3308,12 +3480,12 @@ impl WeightInfo for () { fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (576 ±0)` - // Estimated: `6812 + r * (2637 ±3)` - // Minimum execution time: 261_011_000 picoseconds. - Weight::from_parts(264_554_000, 6812) - // Standard Error: 104_415 - .saturating_add(Weight::from_parts(231_627_084, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Estimated: `9290 + r * (2637 ±3)` + // Minimum execution time: 266_704_000 picoseconds. + Weight::from_parts(273_165_000, 9290) + // Standard Error: 141_486 + .saturating_add(Weight::from_parts(232_947_049, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3323,6 +3495,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) @@ -3337,15 +3511,15 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 1048576]`. fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1307 + t * (277 ±0)` - // Estimated: `12197 + t * (5227 ±0)` - // Minimum execution time: 445_561_000 picoseconds. - Weight::from_parts(62_287_490, 12197) - // Standard Error: 11_797_697 - .saturating_add(Weight::from_parts(357_530_529, 0).saturating_mul(t.into())) + // Measured: `1310 + t * (277 ±0)` + // Estimated: `12200 + t * (5227 ±0)` + // Minimum execution time: 446_451_000 picoseconds. + Weight::from_parts(68_175_222, 12200) + // Standard Error: 11_619_250 + .saturating_add(Weight::from_parts(354_237_260, 0).saturating_mul(t.into())) // Standard Error: 17 - .saturating_add(Weight::from_parts(970, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(13_u64)) + .saturating_add(Weight::from_parts(987, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(16_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(t.into()))) @@ -3355,6 +3529,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:802 w:802) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:801 w:800) @@ -3368,17 +3544,17 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:803 w:803) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:800 w:800) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `r` is `[1, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1278 + r * (255 ±0)` - // Estimated: `9620 + r * (2731 ±0)` - // Minimum execution time: 621_897_000 picoseconds. - Weight::from_parts(631_687_000, 9620) - // Standard Error: 215_241 - .saturating_add(Weight::from_parts(350_527_831, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `1281 + r * (255 ±0)` + // Estimated: `9623 + r * (2731 ±0)` + // Minimum execution time: 628_156_000 picoseconds. + Weight::from_parts(642_052_000, 9623) + // Standard Error: 223_957 + .saturating_add(Weight::from_parts(348_660_264, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(7_u64)) .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(r.into()))) @@ -3388,6 +3564,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:1) @@ -3401,23 +3579,23 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1303 + t * (104 ±0)` - // Estimated: `12211 + t * (2549 ±1)` - // Minimum execution time: 2_181_184_000 picoseconds. - Weight::from_parts(1_194_190_111, 12211) - // Standard Error: 11_578_766 - .saturating_add(Weight::from_parts(6_361_884, 0).saturating_mul(t.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_025, 0).saturating_mul(i.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_158, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(16_u64)) + // Measured: `1306 + t * (104 ±0)` + // Estimated: `12214 + t * (2549 ±1)` + // Minimum execution time: 2_106_379_000 picoseconds. + Weight::from_parts(1_098_227_098, 12214) + // Standard Error: 12_549_729 + .saturating_add(Weight::from_parts(21_628_382, 0).saturating_mul(t.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_059, 0).saturating_mul(i.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_220, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(19_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) @@ -3427,6 +3605,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3440,13 +3620,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `862 + r * (8 ±0)` - // Estimated: `6801 + r * (8 ±0)` - // Minimum execution time: 241_609_000 picoseconds. - Weight::from_parts(268_716_874, 6801) - // Standard Error: 617 - .saturating_add(Weight::from_parts(377_753, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `865 + r * (8 ±0)` + // Estimated: `9279 + r * (8 ±0)` + // Minimum execution time: 257_102_000 picoseconds. + Weight::from_parts(278_920_692, 9279) + // Standard Error: 464 + .saturating_add(Weight::from_parts(374_120, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -3454,6 +3634,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3467,19 +3649,21 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `870` - // Estimated: `6808` - // Minimum execution time: 261_296_000 picoseconds. - Weight::from_parts(255_531_654, 6808) + // Measured: `873` + // Estimated: `9286` + // Minimum execution time: 266_609_000 picoseconds. + Weight::from_parts(263_034_132, 9286) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_081, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_082, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3493,13 +3677,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 ±0)` - // Estimated: `6806 + r * (8 ±0)` - // Minimum execution time: 243_583_000 picoseconds. - Weight::from_parts(270_025_058, 6806) - // Standard Error: 560 - .saturating_add(Weight::from_parts(767_519, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 ±0)` + // Estimated: `9284 + r * (8 ±0)` + // Minimum execution time: 246_275_000 picoseconds. + Weight::from_parts(279_091_594, 9284) + // Standard Error: 887 + .saturating_add(Weight::from_parts(810_394, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -3507,6 +3691,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3520,19 +3706,21 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6814` - // Minimum execution time: 253_798_000 picoseconds. - Weight::from_parts(265_542_351, 6814) + // Measured: `875` + // Estimated: `9292` + // Minimum execution time: 249_254_000 picoseconds. + Weight::from_parts(270_833_823, 9292) // Standard Error: 0 - .saturating_add(Weight::from_parts(3_343, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(3_347, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3546,13 +3734,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 ±0)` - // Estimated: `6808 + r * (8 ±0)` - // Minimum execution time: 247_332_000 picoseconds. - Weight::from_parts(269_183_656, 6808) - // Standard Error: 665 - .saturating_add(Weight::from_parts(443_386, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 ±0)` + // Estimated: `9286 + r * (8 ±0)` + // Minimum execution time: 259_131_000 picoseconds. + Weight::from_parts(272_665_020, 9286) + // Standard Error: 862 + .saturating_add(Weight::from_parts(451_247, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -3560,6 +3748,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3573,19 +3763,21 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6813` - // Minimum execution time: 250_855_000 picoseconds. - Weight::from_parts(258_752_975, 6813) + // Measured: `875` + // Estimated: `9291` + // Minimum execution time: 276_465_000 picoseconds. + Weight::from_parts(269_450_297, 9291) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_190, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3599,13 +3791,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 ±0)` - // Estimated: `6805 + r * (8 ±0)` - // Minimum execution time: 240_733_000 picoseconds. - Weight::from_parts(269_134_358, 6805) - // Standard Error: 512 - .saturating_add(Weight::from_parts(440_043, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 ±0)` + // Estimated: `9283 + r * (8 ±0)` + // Minimum execution time: 248_349_000 picoseconds. + Weight::from_parts(273_660_686, 9283) + // Standard Error: 656 + .saturating_add(Weight::from_parts(444_293, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -3613,6 +3805,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3626,19 +3820,21 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6811` - // Minimum execution time: 247_377_000 picoseconds. - Weight::from_parts(261_077_322, 6811) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_195, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `875` + // Estimated: `9289` + // Minimum execution time: 252_107_000 picoseconds. + Weight::from_parts(267_427_726, 9289) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3652,13 +3848,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `997 + n * (1 ±0)` - // Estimated: `6934 + n * (1 ±0)` - // Minimum execution time: 307_337_000 picoseconds. - Weight::from_parts(326_710_473, 6934) - // Standard Error: 9 - .saturating_add(Weight::from_parts(5_765, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `1000 + n * (1 ±0)` + // Estimated: `9412 + n * (1 ±0)` + // Minimum execution time: 311_252_000 picoseconds. + Weight::from_parts(333_250_773, 9412) + // Standard Error: 12 + .saturating_add(Weight::from_parts(5_668, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3666,6 +3862,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3679,13 +3877,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `805 + r * (112 ±0)` - // Estimated: `6748 + r * (112 ±0)` - // Minimum execution time: 245_432_000 picoseconds. - Weight::from_parts(294_206_377, 6748) - // Standard Error: 7_229 - .saturating_add(Weight::from_parts(41_480_485, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `810 + r * (112 ±0)` + // Estimated: `9226 + r * (112 ±0)` + // Minimum execution time: 267_431_000 picoseconds. + Weight::from_parts(300_733_048, 9226) + // Standard Error: 8_806 + .saturating_add(Weight::from_parts(42_169_197, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) } @@ -3693,6 +3891,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3706,13 +3906,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `907 + r * (76 ±0)` - // Estimated: `6802 + r * (77 ±0)` - // Minimum execution time: 247_788_000 picoseconds. - Weight::from_parts(303_940_062, 6802) - // Standard Error: 10_671 - .saturating_add(Weight::from_parts(45_730_772, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `910 + r * (76 ±0)` + // Estimated: `9279 + r * (77 ±0)` + // Minimum execution time: 265_905_000 picoseconds. + Weight::from_parts(305_381_951, 9279) + // Standard Error: 9_863 + .saturating_add(Weight::from_parts(45_771_182, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) } @@ -3720,6 +3920,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3733,13 +3935,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `877 + r * (42 ±0)` - // Estimated: `6816 + r * (42 ±0)` - // Minimum execution time: 248_825_000 picoseconds. - Weight::from_parts(286_832_225, 6816) - // Standard Error: 5_274 - .saturating_add(Weight::from_parts(11_889_262, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `880 + r * (42 ±0)` + // Estimated: `9294 + r * (42 ±0)` + // Minimum execution time: 266_701_000 picoseconds. + Weight::from_parts(292_407_888, 9294) + // Standard Error: 5_394 + .saturating_add(Weight::from_parts(11_896_065, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) } @@ -3747,6 +3949,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) @@ -3761,12 +3965,12 @@ impl WeightInfo for () { fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (965 ±0)` - // Estimated: `6807 + r * (3090 ±7)` - // Minimum execution time: 244_982_000 picoseconds. - Weight::from_parts(265_297_000, 6807) - // Standard Error: 39_895 - .saturating_add(Weight::from_parts(22_435_888, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Estimated: `9285 + r * (3090 ±7)` + // Minimum execution time: 268_755_000 picoseconds. + Weight::from_parts(272_300_000, 9285) + // Standard Error: 43_747 + .saturating_add(Weight::from_parts(25_001_889, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) @@ -3776,6 +3980,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) @@ -3789,22 +3995,24 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 32]`. fn lock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `928 + r * (131 ±0)` - // Estimated: `6878 + r * (2606 ±0)` - // Minimum execution time: 246_455_000 picoseconds. - Weight::from_parts(275_334_919, 6878) - // Standard Error: 20_911 - .saturating_add(Weight::from_parts(6_427_525, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `937 + r * (131 ±0)` + // Estimated: `9346 + r * (2607 ±0)` + // Minimum execution time: 255_190_000 picoseconds. + Weight::from_parts(285_643_922, 9346) + // Standard Error: 23_582 + .saturating_add(Weight::from_parts(6_289_940, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2606).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2607).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) @@ -3818,13 +4026,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 32]`. fn unlock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `969 + r * (183 ±0)` + // Measured: `972 + r * (184 ±0)` // Estimated: `129453 + r * (2568 ±0)` - // Minimum execution time: 254_472_000 picoseconds. - Weight::from_parts(280_657_909, 129453) - // Standard Error: 20_131 - .saturating_add(Weight::from_parts(5_644_006, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Minimum execution time: 260_061_000 picoseconds. + Weight::from_parts(286_849_343, 129453) + // Standard Error: 22_406 + .saturating_add(Weight::from_parts(5_661_459, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3834,6 +4042,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3847,13 +4057,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `858 + r * (3 ±0)` - // Estimated: `6804 + r * (3 ±0)` - // Minimum execution time: 250_535_000 picoseconds. - Weight::from_parts(270_318_376, 6804) - // Standard Error: 386 - .saturating_add(Weight::from_parts(174_627, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `861 + r * (3 ±0)` + // Estimated: `9282 + r * (3 ±0)` + // Minimum execution time: 250_922_000 picoseconds. + Weight::from_parts(277_294_913, 9282) + // Standard Error: 391 + .saturating_add(Weight::from_parts(169_457, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -3861,6 +4071,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3874,13 +4086,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_account_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2109 + r * (39 ±0)` - // Estimated: `7899 + r * (40 ±0)` - // Minimum execution time: 248_174_000 picoseconds. - Weight::from_parts(301_826_520, 7899) - // Standard Error: 801 - .saturating_add(Weight::from_parts(248_479, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `2112 + r * (39 ±0)` + // Estimated: `10377 + r * (40 ±0)` + // Minimum execution time: 267_293_000 picoseconds. + Weight::from_parts(309_125_635, 10377) + // Standard Error: 637 + .saturating_add(Weight::from_parts(245_508, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) } @@ -3888,6 +4100,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3903,13 +4117,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_instantiation_nonce(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `861 + r * (3 ±0)` - // Estimated: `6801 + r * (3 ±0)` - // Minimum execution time: 246_540_000 picoseconds. - Weight::from_parts(268_913_509, 6801) - // Standard Error: 378 - .saturating_add(Weight::from_parts(154_950, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `864 + r * (3 ±0)` + // Estimated: `9279 + r * (3 ±0)` + // Minimum execution time: 256_057_000 picoseconds. + Weight::from_parts(278_068_870, 9279) + // Standard Error: 335 + .saturating_add(Weight::from_parts(152_083, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -3918,9 +4132,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_777_000 picoseconds. - Weight::from_parts(1_707_601, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(15_392, 0).saturating_mul(r.into())) + // Minimum execution time: 1_846_000 picoseconds. + Weight::from_parts(1_808_029, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(14_937, 0).saturating_mul(r.into())) } } diff --git a/substrate/frame/contracts/uapi/src/host.rs b/substrate/frame/contracts/uapi/src/host.rs index c25be4479cefaac81e8cb25c6b56502f9e2fd83a..04f58895ab4f6ccebb6452ebeec20038ce7f0d78 100644 --- a/substrate/frame/contracts/uapi/src/host.rs +++ b/substrate/frame/contracts/uapi/src/host.rs @@ -180,7 +180,7 @@ pub trait HostFn { flags: CallFlags, callee: &[u8], ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit: Option<&[u8]>, value: &[u8], input_data: &[u8], diff --git a/substrate/frame/contracts/uapi/src/host/riscv32.rs b/substrate/frame/contracts/uapi/src/host/riscv32.rs index dbd5abc42409741af2679d99274d1cdf48f2c757..561ab28747df9e55d105d6db09f4776d5e453e28 100644 --- a/substrate/frame/contracts/uapi/src/host/riscv32.rs +++ b/substrate/frame/contracts/uapi/src/host/riscv32.rs @@ -130,7 +130,7 @@ impl HostFn for HostFnImpl { flags: CallFlags, callee: &[u8], ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit: Option<&[u8]>, value: &[u8], input_data: &[u8], diff --git a/substrate/frame/contracts/uapi/src/host/wasm32.rs b/substrate/frame/contracts/uapi/src/host/wasm32.rs index 9651aa73d6f9bd3364432ed9135e460f62aa0b6c..bc697238061ab16e47d304bd9583f51f7eb99c0b 100644 --- a/substrate/frame/contracts/uapi/src/host/wasm32.rs +++ b/substrate/frame/contracts/uapi/src/host/wasm32.rs @@ -223,7 +223,7 @@ mod sys { pub fn weight_to_fee( ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, output_ptr: *mut u8, output_len_ptr: *mut u32, ); @@ -239,7 +239,7 @@ mod sys { flags: u32, callee_ptr: *const u8, ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit_ptr: *const u8, transferred_value_ptr: *const u8, input_data_ptr: *const u8, @@ -251,7 +251,7 @@ mod sys { pub fn instantiate( code_hash_ptr: *const u8, ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit_ptr: *const u8, value_ptr: *const u8, input_ptr: *const u8, @@ -301,6 +301,7 @@ macro_rules! impl_wrapper_for { unsafe { $( $mod )::*::$name(output.as_mut_ptr(), &mut output_len); } + extract_from_slice(output, output_len as usize) } } }; @@ -487,7 +488,7 @@ impl HostFn for HostFnImpl { flags: CallFlags, callee: &[u8], ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit: Option<&[u8]>, value: &[u8], input_data: &[u8], @@ -501,7 +502,7 @@ impl HostFn for HostFnImpl { flags.bits(), callee.as_ptr(), ref_time_limit, - proof_time_limit, + proof_size_limit, deposit_ptr, value.as_ptr(), input_data.as_ptr(), diff --git a/substrate/frame/conviction-voting/src/weights.rs b/substrate/frame/conviction-voting/src/weights.rs index 225f5c2cadd6fc3d4cb1dc735cf164d92150327f..75d9e8499ed8e0b3f71d1a15e23b9c26638e44f1 100644 --- a/substrate/frame/conviction-voting/src/weights.rs +++ b/substrate/frame/conviction-voting/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_conviction_voting +//! Autogenerated weights for `pallet_conviction_voting` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/conviction-voting/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/conviction-voting/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_conviction_voting. +/// Weight functions needed for `pallet_conviction_voting`. pub trait WeightInfo { fn vote_new() -> Weight; fn vote_existing() -> Weight; @@ -61,280 +60,300 @@ pub trait WeightInfo { fn unlock() -> Weight; } -/// Weights for pallet_conviction_voting using the Substrate node and recommended hardware. +/// Weights for `pallet_conviction_voting` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `13074` + // Measured: `13141` // Estimated: `219984` - // Minimum execution time: 112_936_000 picoseconds. - Weight::from_parts(116_972_000, 219984) + // Minimum execution time: 102_539_000 picoseconds. + Weight::from_parts(105_873_000, 219984) .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `20216` + // Measured: `20283` // Estimated: `219984` - // Minimum execution time: 291_971_000 picoseconds. - Weight::from_parts(301_738_000, 219984) + // Minimum execution time: 275_424_000 picoseconds. + Weight::from_parts(283_690_000, 219984) .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn remove_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `19968` + // Measured: `20035` // Estimated: `219984` - // Minimum execution time: 262_582_000 picoseconds. - Weight::from_parts(270_955_000, 219984) + // Minimum execution time: 275_109_000 picoseconds. + Weight::from_parts(281_315_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn remove_other_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `12675` + // Measured: `12742` // Estimated: `30706` - // Minimum execution time: 52_909_000 picoseconds. - Weight::from_parts(56_365_000, 30706) + // Minimum execution time: 49_629_000 picoseconds. + Weight::from_parts(51_300_000, 30706) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 1]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `240 + r * (1627 ±0)` + // Measured: `306 + r * (1628 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 54_640_000 picoseconds. - Weight::from_parts(57_185_281, 109992) - // Standard Error: 193_362 - .saturating_add(Weight::from_parts(44_897_418, 0).saturating_mul(r.into())) + // Minimum execution time: 45_776_000 picoseconds. + Weight::from_parts(47_917_822, 109992) + // Standard Error: 124_174 + .saturating_add(Weight::from_parts(43_171_077, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 109992).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 1]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `406 + r * (1376 ±0)` + // Measured: `472 + r * (1377 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 26_514_000 picoseconds. - Weight::from_parts(28_083_732, 109992) - // Standard Error: 104_905 - .saturating_add(Weight::from_parts(40_722_467, 0).saturating_mul(r.into())) + // Minimum execution time: 23_600_000 picoseconds. + Weight::from_parts(25_001_426, 109992) + // Standard Error: 72_034 + .saturating_add(Weight::from_parts(37_851_873, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 109992).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn unlock() -> Weight { // Proof Size summary in bytes: - // Measured: `11734` + // Measured: `11800` // Estimated: `30706` - // Minimum execution time: 71_140_000 picoseconds. - Weight::from_parts(77_388_000, 30706) + // Minimum execution time: 66_247_000 picoseconds. + Weight::from_parts(67_552_000, 30706) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `13074` + // Measured: `13141` // Estimated: `219984` - // Minimum execution time: 112_936_000 picoseconds. - Weight::from_parts(116_972_000, 219984) + // Minimum execution time: 102_539_000 picoseconds. + Weight::from_parts(105_873_000, 219984) .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `20216` + // Measured: `20283` // Estimated: `219984` - // Minimum execution time: 291_971_000 picoseconds. - Weight::from_parts(301_738_000, 219984) + // Minimum execution time: 275_424_000 picoseconds. + Weight::from_parts(283_690_000, 219984) .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn remove_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `19968` + // Measured: `20035` // Estimated: `219984` - // Minimum execution time: 262_582_000 picoseconds. - Weight::from_parts(270_955_000, 219984) + // Minimum execution time: 275_109_000 picoseconds. + Weight::from_parts(281_315_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn remove_other_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `12675` + // Measured: `12742` // Estimated: `30706` - // Minimum execution time: 52_909_000 picoseconds. - Weight::from_parts(56_365_000, 30706) + // Minimum execution time: 49_629_000 picoseconds. + Weight::from_parts(51_300_000, 30706) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 1]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `240 + r * (1627 ±0)` + // Measured: `306 + r * (1628 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 54_640_000 picoseconds. - Weight::from_parts(57_185_281, 109992) - // Standard Error: 193_362 - .saturating_add(Weight::from_parts(44_897_418, 0).saturating_mul(r.into())) + // Minimum execution time: 45_776_000 picoseconds. + Weight::from_parts(47_917_822, 109992) + // Standard Error: 124_174 + .saturating_add(Weight::from_parts(43_171_077, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 109992).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 1]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `406 + r * (1376 ±0)` + // Measured: `472 + r * (1377 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 26_514_000 picoseconds. - Weight::from_parts(28_083_732, 109992) - // Standard Error: 104_905 - .saturating_add(Weight::from_parts(40_722_467, 0).saturating_mul(r.into())) + // Minimum execution time: 23_600_000 picoseconds. + Weight::from_parts(25_001_426, 109992) + // Standard Error: 72_034 + .saturating_add(Weight::from_parts(37_851_873, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 109992).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn unlock() -> Weight { // Proof Size summary in bytes: - // Measured: `11734` + // Measured: `11800` // Estimated: `30706` - // Minimum execution time: 71_140_000 picoseconds. - Weight::from_parts(77_388_000, 30706) + // Minimum execution time: 66_247_000 picoseconds. + Weight::from_parts(67_552_000, 30706) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/substrate/frame/core-fellowship/src/weights.rs b/substrate/frame/core-fellowship/src/weights.rs index 8bbfd1a4dd81da2146994f668d722f1e8afa27b6..fce1d3747a8bb283c1ccaf5b8d7be1c113d39d92 100644 --- a/substrate/frame/core-fellowship/src/weights.rs +++ b/substrate/frame/core-fellowship/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_core_fellowship +//! Autogenerated weights for `pallet_core_fellowship` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/core-fellowship/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/core-fellowship/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_core_fellowship. +/// Weight functions needed for `pallet_core_fellowship`. pub trait WeightInfo { fn set_params() -> Weight; fn bump_offboard() -> Weight; @@ -64,336 +63,344 @@ pub trait WeightInfo { fn submit_evidence() -> Weight; } -/// Weights for pallet_core_fellowship using the Substrate node and recommended hardware. +/// Weights for `pallet_core_fellowship` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: CoreFellowship Params (r:0 w:1) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Params` (r:0 w:1) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) fn set_params() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_454_000 picoseconds. - Weight::from_parts(9_804_000, 0) + // Minimum execution time: 7_146_000 picoseconds. + Weight::from_parts(7_426_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Params (r:1 w:0) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:1 w:0) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Params` (r:1 w:0) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `16887` + // Measured: `17274` // Estimated: `19894` - // Minimum execution time: 58_489_000 picoseconds. - Weight::from_parts(60_202_000, 19894) + // Minimum execution time: 54_511_000 picoseconds. + Weight::from_parts(56_995_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Params (r:1 w:0) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:1 w:0) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Params` (r:1 w:0) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_demote() -> Weight { // Proof Size summary in bytes: - // Measured: `16997` + // Measured: `17384` // Estimated: `19894` - // Minimum execution time: 60_605_000 picoseconds. - Weight::from_parts(63_957_000, 19894) + // Minimum execution time: 56_453_000 picoseconds. + Weight::from_parts(59_030_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn set_active() -> Weight { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 17_816_000 picoseconds. - Weight::from_parts(18_524_000, 3514) + // Minimum execution time: 15_940_000 picoseconds. + Weight::from_parts(16_381_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn induct() -> Weight { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 27_249_000 picoseconds. - Weight::from_parts(28_049_000, 3514) + // Minimum execution time: 24_193_000 picoseconds. + Weight::from_parts(24_963_000, 3514) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship Params (r:1 w:0) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Params` (r:1 w:0) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn promote() -> Weight { // Proof Size summary in bytes: // Measured: `16865` // Estimated: `19894` - // Minimum execution time: 56_642_000 picoseconds. - Weight::from_parts(59_353_000, 19894) + // Minimum execution time: 48_138_000 picoseconds. + Weight::from_parts(50_007_000, 19894) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:0 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:0 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) fn offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `359` + // Measured: `293` // Estimated: `3514` - // Minimum execution time: 17_459_000 picoseconds. - Weight::from_parts(18_033_000, 3514) + // Minimum execution time: 15_225_000 picoseconds. + Weight::from_parts(15_730_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) fn import() -> Weight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 16_728_000 picoseconds. - Weight::from_parts(17_263_000, 3514) + // Minimum execution time: 14_507_000 picoseconds. + Weight::from_parts(14_935_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) fn approve() -> Weight { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 41_487_000 picoseconds. - Weight::from_parts(43_459_000, 19894) + // Minimum execution time: 34_050_000 picoseconds. + Weight::from_parts(36_323_000, 19894) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: CoreFellowship Member (r:1 w:0) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:0) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) fn submit_evidence() -> Weight { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 26_033_000 picoseconds. - Weight::from_parts(26_612_000, 19894) + // Minimum execution time: 24_016_000 picoseconds. + Weight::from_parts(24_607_000, 19894) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: CoreFellowship Params (r:0 w:1) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Params` (r:0 w:1) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) fn set_params() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_454_000 picoseconds. - Weight::from_parts(9_804_000, 0) + // Minimum execution time: 7_146_000 picoseconds. + Weight::from_parts(7_426_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Params (r:1 w:0) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:1 w:0) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Params` (r:1 w:0) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `16887` + // Measured: `17274` // Estimated: `19894` - // Minimum execution time: 58_489_000 picoseconds. - Weight::from_parts(60_202_000, 19894) + // Minimum execution time: 54_511_000 picoseconds. + Weight::from_parts(56_995_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Params (r:1 w:0) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:1 w:0) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Params` (r:1 w:0) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_demote() -> Weight { // Proof Size summary in bytes: - // Measured: `16997` + // Measured: `17384` // Estimated: `19894` - // Minimum execution time: 60_605_000 picoseconds. - Weight::from_parts(63_957_000, 19894) + // Minimum execution time: 56_453_000 picoseconds. + Weight::from_parts(59_030_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn set_active() -> Weight { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 17_816_000 picoseconds. - Weight::from_parts(18_524_000, 3514) + // Minimum execution time: 15_940_000 picoseconds. + Weight::from_parts(16_381_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn induct() -> Weight { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 27_249_000 picoseconds. - Weight::from_parts(28_049_000, 3514) + // Minimum execution time: 24_193_000 picoseconds. + Weight::from_parts(24_963_000, 3514) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship Params (r:1 w:0) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Params` (r:1 w:0) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn promote() -> Weight { // Proof Size summary in bytes: // Measured: `16865` // Estimated: `19894` - // Minimum execution time: 56_642_000 picoseconds. - Weight::from_parts(59_353_000, 19894) + // Minimum execution time: 48_138_000 picoseconds. + Weight::from_parts(50_007_000, 19894) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:0 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:0 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) fn offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `359` + // Measured: `293` // Estimated: `3514` - // Minimum execution time: 17_459_000 picoseconds. - Weight::from_parts(18_033_000, 3514) + // Minimum execution time: 15_225_000 picoseconds. + Weight::from_parts(15_730_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) fn import() -> Weight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 16_728_000 picoseconds. - Weight::from_parts(17_263_000, 3514) + // Minimum execution time: 14_507_000 picoseconds. + Weight::from_parts(14_935_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) fn approve() -> Weight { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 41_487_000 picoseconds. - Weight::from_parts(43_459_000, 19894) + // Minimum execution time: 34_050_000 picoseconds. + Weight::from_parts(36_323_000, 19894) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: CoreFellowship Member (r:1 w:0) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:0) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) fn submit_evidence() -> Weight { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 26_033_000 picoseconds. - Weight::from_parts(26_612_000, 19894) + // Minimum execution time: 24_016_000 picoseconds. + Weight::from_parts(24_607_000, 19894) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs index 089556191cd14eee735d12976ba6b0ed471a4eac..08e2a7599f5542a8b87ce80df8d96b5cf4ee9af5 100644 --- a/substrate/frame/democracy/src/lib.rs +++ b/substrate/frame/democracy/src/lib.rs @@ -211,7 +211,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/democracy/src/weights.rs b/substrate/frame/democracy/src/weights.rs index 241f6c3cb38de2c270411cd2d39913f857bbe807..d6097922a82f2cfed77f974dcbaaa64a69c8d82d 100644 --- a/substrate/frame/democracy/src/weights.rs +++ b/substrate/frame/democracy/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_democracy +//! Autogenerated weights for `pallet_democracy` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/democracy/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/democracy/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_democracy. +/// Weight functions needed for `pallet_democracy`. pub trait WeightInfo { fn propose() -> Weight; fn second() -> Weight; @@ -82,904 +81,916 @@ pub trait WeightInfo { fn clear_referendum_metadata() -> Weight; } -/// Weights for pallet_democracy using the Substrate node and recommended hardware. +/// Weights for `pallet_democracy` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Democracy PublicPropCount (r:1 w:1) - /// Proof: Democracy PublicPropCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:0) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:0 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicPropCount` (r:1 w:1) + /// Proof: `Democracy::PublicPropCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::PublicProps` (r:1 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:1 w:0) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) + /// Storage: `Democracy::DepositOf` (r:0 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) fn propose() -> Weight { // Proof Size summary in bytes: - // Measured: `4801` + // Measured: `4834` // Estimated: `18187` - // Minimum execution time: 49_339_000 picoseconds. - Weight::from_parts(50_942_000, 18187) + // Minimum execution time: 39_930_000 picoseconds. + Weight::from_parts(41_746_000, 18187) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) + /// Storage: `Democracy::DepositOf` (r:1 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) fn second() -> Weight { // Proof Size summary in bytes: - // Measured: `3556` + // Measured: `3589` // Estimated: `6695` - // Minimum execution time: 43_291_000 picoseconds. - Weight::from_parts(44_856_000, 6695) + // Minimum execution time: 36_490_000 picoseconds. + Weight::from_parts(37_615_000, 6695) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `3470` + // Measured: `3503` // Estimated: `7260` - // Minimum execution time: 61_890_000 picoseconds. - Weight::from_parts(63_626_000, 7260) + // Minimum execution time: 54_257_000 picoseconds. + Weight::from_parts(55_912_000, 7260) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `3492` + // Measured: `3525` // Estimated: `7260` - // Minimum execution time: 67_802_000 picoseconds. - Weight::from_parts(69_132_000, 7260) + // Minimum execution time: 56_878_000 picoseconds. + Weight::from_parts(58_796_000, 7260) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy Cancellations (r:1 w:1) - /// Proof: Democracy Cancellations (max_values: None, max_size: Some(33), added: 2508, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Cancellations` (r:1 w:1) + /// Proof: `Democracy::Cancellations` (`max_values`: None, `max_size`: Some(33), added: 2508, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn emergency_cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `366` + // Measured: `399` // Estimated: `3666` - // Minimum execution time: 25_757_000 picoseconds. - Weight::from_parts(27_226_000, 3666) + // Minimum execution time: 22_700_000 picoseconds. + Weight::from_parts(23_539_000, 3666) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:3 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:0 w:1) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::DepositOf` (r:1 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:3 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:0 w:1) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) fn blacklist() -> Weight { // Proof Size summary in bytes: - // Measured: `5910` + // Measured: `5943` // Estimated: `18187` - // Minimum execution time: 113_060_000 picoseconds. - Weight::from_parts(114_813_000, 18187) + // Minimum execution time: 95_398_000 picoseconds. + Weight::from_parts(97_261_000, 18187) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:0) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:1 w:0) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) fn external_propose() -> Weight { // Proof Size summary in bytes: - // Measured: `3416` + // Measured: `3449` // Estimated: `6703` - // Minimum execution time: 13_413_000 picoseconds. - Weight::from_parts(13_794_000, 6703) + // Minimum execution time: 11_745_000 picoseconds. + Weight::from_parts(12_304_000, 6703) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:0 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:0 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) fn external_propose_majority() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_213_000 picoseconds. - Weight::from_parts(3_429_000, 0) + // Minimum execution time: 2_710_000 picoseconds. + Weight::from_parts(2_918_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:0 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:0 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) fn external_propose_default() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_280_000 picoseconds. - Weight::from_parts(3_389_000, 0) + // Minimum execution time: 2_664_000 picoseconds. + Weight::from_parts(2_776_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:1) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:2) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:0 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumCount` (r:1 w:1) + /// Proof: `Democracy::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:2) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:0 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) fn fast_track() -> Weight { // Proof Size summary in bytes: - // Measured: `286` + // Measured: `319` // Estimated: `3518` - // Minimum execution time: 28_142_000 picoseconds. - Weight::from_parts(28_862_000, 3518) + // Minimum execution time: 22_585_000 picoseconds. + Weight::from_parts(23_689_000, 3518) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:1) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:1 w:1) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn veto_external() -> Weight { // Proof Size summary in bytes: - // Measured: `3519` + // Measured: `3552` // Estimated: `6703` - // Minimum execution time: 32_395_000 picoseconds. - Weight::from_parts(33_617_000, 6703) + // Minimum execution time: 26_391_000 picoseconds. + Weight::from_parts(27_141_000, 6703) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::DepositOf` (r:1 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn cancel_proposal() -> Weight { // Proof Size summary in bytes: - // Measured: `5821` + // Measured: `5854` // Estimated: `18187` - // Minimum execution time: 92_255_000 picoseconds. - Weight::from_parts(93_704_000, 18187) + // Minimum execution time: 77_905_000 picoseconds. + Weight::from_parts(79_628_000, 18187) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:0 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:0 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) fn cancel_referendum() -> Weight { // Proof Size summary in bytes: - // Measured: `271` + // Measured: `304` // Estimated: `3518` - // Minimum execution time: 19_623_000 picoseconds. - Weight::from_parts(20_545_000, 3518) + // Minimum execution time: 15_735_000 picoseconds. + Weight::from_parts(16_525_000, 3518) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Democracy LowestUnbaked (r:1 w:1) - /// Proof: Democracy LowestUnbaked (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:0) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::LowestUnbaked` (r:1 w:1) + /// Proof: `Democracy::LowestUnbaked` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumCount` (r:1 w:0) + /// Proof: `Democracy::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:0) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn on_initialize_base(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `244 + r * (86 ±0)` + // Measured: `277 + r * (86 ±0)` // Estimated: `1489 + r * (2676 ±0)` - // Minimum execution time: 7_032_000 picoseconds. - Weight::from_parts(7_931_421, 1489) - // Standard Error: 7_395 - .saturating_add(Weight::from_parts(3_236_964, 0).saturating_mul(r.into())) + // Minimum execution time: 5_274_000 picoseconds. + Weight::from_parts(6_162_399, 1489) + // Standard Error: 6_924 + .saturating_add(Weight::from_parts(3_186_702, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy LowestUnbaked (r:1 w:1) - /// Proof: Democracy LowestUnbaked (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:0) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy LastTabledWasExternal (r:1 w:0) - /// Proof: Democracy LastTabledWasExternal (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::LowestUnbaked` (r:1 w:1) + /// Proof: `Democracy::LowestUnbaked` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumCount` (r:1 w:0) + /// Proof: `Democracy::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::LastTabledWasExternal` (r:1 w:0) + /// Proof: `Democracy::LastTabledWasExternal` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Democracy::NextExternal` (r:1 w:0) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::PublicProps` (r:1 w:0) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:0) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `244 + r * (86 ±0)` + // Measured: `277 + r * (86 ±0)` // Estimated: `18187 + r * (2676 ±0)` - // Minimum execution time: 10_524_000 picoseconds. - Weight::from_parts(10_369_064, 18187) - // Standard Error: 8_385 - .saturating_add(Weight::from_parts(3_242_334, 0).saturating_mul(r.into())) + // Minimum execution time: 7_950_000 picoseconds. + Weight::from_parts(7_381_228, 18187) + // Standard Error: 6_650 + .saturating_add(Weight::from_parts(3_198_515, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy VotingOf (r:3 w:3) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:99) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:3 w:3) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:99) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `830 + r * (108 ±0)` + // Measured: `863 + r * (108 ±0)` // Estimated: `19800 + r * (2676 ±0)` - // Minimum execution time: 46_106_000 picoseconds. - Weight::from_parts(48_936_654, 19800) - // Standard Error: 8_879 - .saturating_add(Weight::from_parts(4_708_141, 0).saturating_mul(r.into())) + // Minimum execution time: 40_109_000 picoseconds. + Weight::from_parts(43_164_384, 19800) + // Standard Error: 7_267 + .saturating_add(Weight::from_parts(4_161_526, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy VotingOf (r:2 w:2) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:99) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:2 w:2) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:99) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `493 + r * (108 ±0)` + // Measured: `526 + r * (108 ±0)` // Estimated: `13530 + r * (2676 ±0)` - // Minimum execution time: 21_078_000 picoseconds. - Weight::from_parts(22_732_737, 13530) - // Standard Error: 7_969 - .saturating_add(Weight::from_parts(4_626_458, 0).saturating_mul(r.into())) + // Minimum execution time: 17_466_000 picoseconds. + Weight::from_parts(18_004_456, 13530) + // Standard Error: 6_327 + .saturating_add(Weight::from_parts(4_194_583, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy PublicProps (r:0 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:0 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) fn clear_public_proposals() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_229_000 picoseconds. - Weight::from_parts(3_415_000, 0) + // Minimum execution time: 2_824_000 picoseconds. + Weight::from_parts(2_948_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn unlock_remove(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `563` + // Measured: `596` // Estimated: `7260` - // Minimum execution time: 25_735_000 picoseconds. - Weight::from_parts(41_341_468, 7260) - // Standard Error: 3_727 - .saturating_add(Weight::from_parts(94_755, 0).saturating_mul(r.into())) + // Minimum execution time: 23_373_000 picoseconds. + Weight::from_parts(34_306_582, 7260) + // Standard Error: 2_849 + .saturating_add(Weight::from_parts(85_027, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn unlock_set(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `564 + r * (22 ±0)` + // Measured: `597 + r * (22 ±0)` // Estimated: `7260` - // Minimum execution time: 36_233_000 picoseconds. - Weight::from_parts(39_836_017, 7260) - // Standard Error: 1_791 - .saturating_add(Weight::from_parts(132_158, 0).saturating_mul(r.into())) + // Minimum execution time: 31_574_000 picoseconds. + Weight::from_parts(33_906_658, 7260) + // Standard Error: 1_514 + .saturating_add(Weight::from_parts(124_471, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 100]`. fn remove_vote(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `728 + r * (26 ±0)` + // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 16_081_000 picoseconds. - Weight::from_parts(19_624_101, 7260) - // Standard Error: 1_639 - .saturating_add(Weight::from_parts(133_630, 0).saturating_mul(r.into())) + // Minimum execution time: 15_204_000 picoseconds. + Weight::from_parts(18_405_879, 7260) + // Standard Error: 1_851 + .saturating_add(Weight::from_parts(119_018, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 100]`. fn remove_other_vote(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `728 + r * (26 ±0)` + // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 15_634_000 picoseconds. - Weight::from_parts(19_573_407, 7260) - // Standard Error: 1_790 - .saturating_add(Weight::from_parts(139_707, 0).saturating_mul(r.into())) + // Minimum execution time: 15_120_000 picoseconds. + Weight::from_parts(18_282_222, 7260) + // Standard Error: 1_669 + .saturating_add(Weight::from_parts(127_649, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:0) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:0 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_external_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `356` + // Measured: `456` // Estimated: `3556` - // Minimum execution time: 18_344_000 picoseconds. - Weight::from_parts(18_727_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 17_351_000 picoseconds. + Weight::from_parts(17_964_000, 3556) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:0) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn clear_external_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `286` + // Measured: `319` // Estimated: `3518` - // Minimum execution time: 16_497_000 picoseconds. - Weight::from_parts(16_892_000, 3518) + // Minimum execution time: 13_669_000 picoseconds. + Weight::from_parts(14_410_000, 3518) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:0) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:0 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4888` + // Measured: `4988` // Estimated: `18187` - // Minimum execution time: 39_517_000 picoseconds. - Weight::from_parts(40_632_000, 18187) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 39_162_000 picoseconds. + Weight::from_parts(40_109_000, 18187) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:0) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn clear_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4822` + // Measured: `4855` // Estimated: `18187` - // Minimum execution time: 37_108_000 picoseconds. - Weight::from_parts(37_599_000, 18187) + // Minimum execution time: 34_141_000 picoseconds. + Weight::from_parts(34_732_000, 18187) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:0 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_referendum_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 13_997_000 picoseconds. - Weight::from_parts(14_298_000, 3556) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 13_413_000 picoseconds. + Weight::from_parts(14_039_000, 3556) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:0) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn clear_referendum_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `302` + // Measured: `335` // Estimated: `3666` - // Minimum execution time: 18_122_000 picoseconds. - Weight::from_parts(18_655_000, 3666) + // Minimum execution time: 16_010_000 picoseconds. + Weight::from_parts(16_474_000, 3666) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Democracy PublicPropCount (r:1 w:1) - /// Proof: Democracy PublicPropCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:0) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:0 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicPropCount` (r:1 w:1) + /// Proof: `Democracy::PublicPropCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::PublicProps` (r:1 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:1 w:0) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) + /// Storage: `Democracy::DepositOf` (r:0 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) fn propose() -> Weight { // Proof Size summary in bytes: - // Measured: `4801` + // Measured: `4834` // Estimated: `18187` - // Minimum execution time: 49_339_000 picoseconds. - Weight::from_parts(50_942_000, 18187) + // Minimum execution time: 39_930_000 picoseconds. + Weight::from_parts(41_746_000, 18187) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) + /// Storage: `Democracy::DepositOf` (r:1 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) fn second() -> Weight { // Proof Size summary in bytes: - // Measured: `3556` + // Measured: `3589` // Estimated: `6695` - // Minimum execution time: 43_291_000 picoseconds. - Weight::from_parts(44_856_000, 6695) + // Minimum execution time: 36_490_000 picoseconds. + Weight::from_parts(37_615_000, 6695) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `3470` + // Measured: `3503` // Estimated: `7260` - // Minimum execution time: 61_890_000 picoseconds. - Weight::from_parts(63_626_000, 7260) + // Minimum execution time: 54_257_000 picoseconds. + Weight::from_parts(55_912_000, 7260) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `3492` + // Measured: `3525` // Estimated: `7260` - // Minimum execution time: 67_802_000 picoseconds. - Weight::from_parts(69_132_000, 7260) + // Minimum execution time: 56_878_000 picoseconds. + Weight::from_parts(58_796_000, 7260) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy Cancellations (r:1 w:1) - /// Proof: Democracy Cancellations (max_values: None, max_size: Some(33), added: 2508, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Cancellations` (r:1 w:1) + /// Proof: `Democracy::Cancellations` (`max_values`: None, `max_size`: Some(33), added: 2508, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn emergency_cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `366` + // Measured: `399` // Estimated: `3666` - // Minimum execution time: 25_757_000 picoseconds. - Weight::from_parts(27_226_000, 3666) + // Minimum execution time: 22_700_000 picoseconds. + Weight::from_parts(23_539_000, 3666) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:3 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:0 w:1) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::DepositOf` (r:1 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:3 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:0 w:1) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) fn blacklist() -> Weight { // Proof Size summary in bytes: - // Measured: `5910` + // Measured: `5943` // Estimated: `18187` - // Minimum execution time: 113_060_000 picoseconds. - Weight::from_parts(114_813_000, 18187) + // Minimum execution time: 95_398_000 picoseconds. + Weight::from_parts(97_261_000, 18187) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:0) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:1 w:0) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) fn external_propose() -> Weight { // Proof Size summary in bytes: - // Measured: `3416` + // Measured: `3449` // Estimated: `6703` - // Minimum execution time: 13_413_000 picoseconds. - Weight::from_parts(13_794_000, 6703) + // Minimum execution time: 11_745_000 picoseconds. + Weight::from_parts(12_304_000, 6703) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:0 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:0 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) fn external_propose_majority() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_213_000 picoseconds. - Weight::from_parts(3_429_000, 0) + // Minimum execution time: 2_710_000 picoseconds. + Weight::from_parts(2_918_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:0 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:0 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) fn external_propose_default() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_280_000 picoseconds. - Weight::from_parts(3_389_000, 0) + // Minimum execution time: 2_664_000 picoseconds. + Weight::from_parts(2_776_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:1) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:2) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:0 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumCount` (r:1 w:1) + /// Proof: `Democracy::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:2) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:0 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) fn fast_track() -> Weight { // Proof Size summary in bytes: - // Measured: `286` + // Measured: `319` // Estimated: `3518` - // Minimum execution time: 28_142_000 picoseconds. - Weight::from_parts(28_862_000, 3518) + // Minimum execution time: 22_585_000 picoseconds. + Weight::from_parts(23_689_000, 3518) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:1) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:1 w:1) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn veto_external() -> Weight { // Proof Size summary in bytes: - // Measured: `3519` + // Measured: `3552` // Estimated: `6703` - // Minimum execution time: 32_395_000 picoseconds. - Weight::from_parts(33_617_000, 6703) + // Minimum execution time: 26_391_000 picoseconds. + Weight::from_parts(27_141_000, 6703) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::DepositOf` (r:1 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn cancel_proposal() -> Weight { // Proof Size summary in bytes: - // Measured: `5821` + // Measured: `5854` // Estimated: `18187` - // Minimum execution time: 92_255_000 picoseconds. - Weight::from_parts(93_704_000, 18187) + // Minimum execution time: 77_905_000 picoseconds. + Weight::from_parts(79_628_000, 18187) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:0 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:0 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) fn cancel_referendum() -> Weight { // Proof Size summary in bytes: - // Measured: `271` + // Measured: `304` // Estimated: `3518` - // Minimum execution time: 19_623_000 picoseconds. - Weight::from_parts(20_545_000, 3518) + // Minimum execution time: 15_735_000 picoseconds. + Weight::from_parts(16_525_000, 3518) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Democracy LowestUnbaked (r:1 w:1) - /// Proof: Democracy LowestUnbaked (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:0) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::LowestUnbaked` (r:1 w:1) + /// Proof: `Democracy::LowestUnbaked` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumCount` (r:1 w:0) + /// Proof: `Democracy::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:0) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn on_initialize_base(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `244 + r * (86 ±0)` + // Measured: `277 + r * (86 ±0)` // Estimated: `1489 + r * (2676 ±0)` - // Minimum execution time: 7_032_000 picoseconds. - Weight::from_parts(7_931_421, 1489) - // Standard Error: 7_395 - .saturating_add(Weight::from_parts(3_236_964, 0).saturating_mul(r.into())) + // Minimum execution time: 5_274_000 picoseconds. + Weight::from_parts(6_162_399, 1489) + // Standard Error: 6_924 + .saturating_add(Weight::from_parts(3_186_702, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy LowestUnbaked (r:1 w:1) - /// Proof: Democracy LowestUnbaked (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:0) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy LastTabledWasExternal (r:1 w:0) - /// Proof: Democracy LastTabledWasExternal (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::LowestUnbaked` (r:1 w:1) + /// Proof: `Democracy::LowestUnbaked` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumCount` (r:1 w:0) + /// Proof: `Democracy::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::LastTabledWasExternal` (r:1 w:0) + /// Proof: `Democracy::LastTabledWasExternal` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Democracy::NextExternal` (r:1 w:0) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::PublicProps` (r:1 w:0) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:0) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `244 + r * (86 ±0)` + // Measured: `277 + r * (86 ±0)` // Estimated: `18187 + r * (2676 ±0)` - // Minimum execution time: 10_524_000 picoseconds. - Weight::from_parts(10_369_064, 18187) - // Standard Error: 8_385 - .saturating_add(Weight::from_parts(3_242_334, 0).saturating_mul(r.into())) + // Minimum execution time: 7_950_000 picoseconds. + Weight::from_parts(7_381_228, 18187) + // Standard Error: 6_650 + .saturating_add(Weight::from_parts(3_198_515, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy VotingOf (r:3 w:3) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:99) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:3 w:3) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:99) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `830 + r * (108 ±0)` + // Measured: `863 + r * (108 ±0)` // Estimated: `19800 + r * (2676 ±0)` - // Minimum execution time: 46_106_000 picoseconds. - Weight::from_parts(48_936_654, 19800) - // Standard Error: 8_879 - .saturating_add(Weight::from_parts(4_708_141, 0).saturating_mul(r.into())) + // Minimum execution time: 40_109_000 picoseconds. + Weight::from_parts(43_164_384, 19800) + // Standard Error: 7_267 + .saturating_add(Weight::from_parts(4_161_526, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy VotingOf (r:2 w:2) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:99) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:2 w:2) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:99) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `493 + r * (108 ±0)` + // Measured: `526 + r * (108 ±0)` // Estimated: `13530 + r * (2676 ±0)` - // Minimum execution time: 21_078_000 picoseconds. - Weight::from_parts(22_732_737, 13530) - // Standard Error: 7_969 - .saturating_add(Weight::from_parts(4_626_458, 0).saturating_mul(r.into())) + // Minimum execution time: 17_466_000 picoseconds. + Weight::from_parts(18_004_456, 13530) + // Standard Error: 6_327 + .saturating_add(Weight::from_parts(4_194_583, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy PublicProps (r:0 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:0 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) fn clear_public_proposals() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_229_000 picoseconds. - Weight::from_parts(3_415_000, 0) + // Minimum execution time: 2_824_000 picoseconds. + Weight::from_parts(2_948_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn unlock_remove(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `563` + // Measured: `596` // Estimated: `7260` - // Minimum execution time: 25_735_000 picoseconds. - Weight::from_parts(41_341_468, 7260) - // Standard Error: 3_727 - .saturating_add(Weight::from_parts(94_755, 0).saturating_mul(r.into())) + // Minimum execution time: 23_373_000 picoseconds. + Weight::from_parts(34_306_582, 7260) + // Standard Error: 2_849 + .saturating_add(Weight::from_parts(85_027, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn unlock_set(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `564 + r * (22 ±0)` + // Measured: `597 + r * (22 ±0)` // Estimated: `7260` - // Minimum execution time: 36_233_000 picoseconds. - Weight::from_parts(39_836_017, 7260) - // Standard Error: 1_791 - .saturating_add(Weight::from_parts(132_158, 0).saturating_mul(r.into())) + // Minimum execution time: 31_574_000 picoseconds. + Weight::from_parts(33_906_658, 7260) + // Standard Error: 1_514 + .saturating_add(Weight::from_parts(124_471, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 100]`. fn remove_vote(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `728 + r * (26 ±0)` + // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 16_081_000 picoseconds. - Weight::from_parts(19_624_101, 7260) - // Standard Error: 1_639 - .saturating_add(Weight::from_parts(133_630, 0).saturating_mul(r.into())) + // Minimum execution time: 15_204_000 picoseconds. + Weight::from_parts(18_405_879, 7260) + // Standard Error: 1_851 + .saturating_add(Weight::from_parts(119_018, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 100]`. fn remove_other_vote(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `728 + r * (26 ±0)` + // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 15_634_000 picoseconds. - Weight::from_parts(19_573_407, 7260) - // Standard Error: 1_790 - .saturating_add(Weight::from_parts(139_707, 0).saturating_mul(r.into())) + // Minimum execution time: 15_120_000 picoseconds. + Weight::from_parts(18_282_222, 7260) + // Standard Error: 1_669 + .saturating_add(Weight::from_parts(127_649, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:0) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:0 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_external_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `356` + // Measured: `456` // Estimated: `3556` - // Minimum execution time: 18_344_000 picoseconds. - Weight::from_parts(18_727_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 17_351_000 picoseconds. + Weight::from_parts(17_964_000, 3556) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:0) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn clear_external_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `286` + // Measured: `319` // Estimated: `3518` - // Minimum execution time: 16_497_000 picoseconds. - Weight::from_parts(16_892_000, 3518) + // Minimum execution time: 13_669_000 picoseconds. + Weight::from_parts(14_410_000, 3518) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:0) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:0 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4888` + // Measured: `4988` // Estimated: `18187` - // Minimum execution time: 39_517_000 picoseconds. - Weight::from_parts(40_632_000, 18187) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 39_162_000 picoseconds. + Weight::from_parts(40_109_000, 18187) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:0) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn clear_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4822` + // Measured: `4855` // Estimated: `18187` - // Minimum execution time: 37_108_000 picoseconds. - Weight::from_parts(37_599_000, 18187) + // Minimum execution time: 34_141_000 picoseconds. + Weight::from_parts(34_732_000, 18187) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:0 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_referendum_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 13_997_000 picoseconds. - Weight::from_parts(14_298_000, 3556) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 13_413_000 picoseconds. + Weight::from_parts(14_039_000, 3556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:0) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn clear_referendum_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `302` + // Measured: `335` // Estimated: `3666` - // Minimum execution time: 18_122_000 picoseconds. - Weight::from_parts(18_655_000, 3666) + // Minimum execution time: 16_010_000 picoseconds. + Weight::from_parts(16_474_000, 3666) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 4f43f89abed222f963156c2f5afaebb8288bf45a..6bf4dfe4f1eb993e35a99a34bd21b2bde81d4355 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -1343,7 +1343,7 @@ pub mod pallet { #[pallet::getter(fn minimum_untrusted_score)] pub type MinimumUntrustedScore = StorageValue<_, ElectionScore>; - /// The current storage version. + /// The in-code storage version. /// /// v1: https://github.com/paritytech/substrate/pull/12237/ const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); diff --git a/substrate/frame/election-provider-multi-phase/src/migrations.rs b/substrate/frame/election-provider-multi-phase/src/migrations.rs index 50b821e6db6ae8c7a2cc68192f9dd8cb39e9f460..156f1c02e27cd23e3e37014650fdff66115cd18f 100644 --- a/substrate/frame/election-provider-multi-phase/src/migrations.rs +++ b/substrate/frame/election-provider-multi-phase/src/migrations.rs @@ -27,12 +27,12 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index 18dcd7061c1fc35c7cc64134c33363b746e9baf8..312dc5570900beb19812e5d7667804568997a7a2 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -441,7 +441,7 @@ where type Extrinsic = Extrinsic; } -pub type Extrinsic = sp_runtime::testing::TestXt; +pub type Extrinsic = sp_runtime::generic::UncheckedExtrinsic; parameter_types! { pub MaxNominations: u32 = ::LIMIT as u32; diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index 94348181334061a3c2de81b90742f53c4390ddc9..94cfd059b6c5b05a5add5c08cd946d011d7085ad 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -1813,7 +1813,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic: Extrinsic = codec::Decode::decode(&mut &*encoded).unwrap(); - let call = extrinsic.call; + let call = extrinsic.function; assert!(matches!(call, RuntimeCall::MultiPhase(Call::submit_unsigned { .. }))); }) } @@ -1830,7 +1830,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic = Extrinsic::decode(&mut &*encoded).unwrap(); - let call = match extrinsic.call { + let call = match extrinsic.function { RuntimeCall::MultiPhase(call @ Call::submit_unsigned { .. }) => call, _ => panic!("bad call: unexpected submission"), }; diff --git a/substrate/frame/election-provider-multi-phase/src/weights.rs b/substrate/frame/election-provider-multi-phase/src/weights.rs index be578fac8c435769de3b030a4c7fb20832786b83..ed3e942716e265837dd0021b917222b1d3eba4aa 100644 --- a/substrate/frame/election-provider-multi-phase/src/weights.rs +++ b/substrate/frame/election-provider-multi-phase/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_election_provider_multi_phase +//! Autogenerated weights for `pallet_election_provider_multi_phase` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/election-provider-multi-phase/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/election-provider-multi-phase/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_election_provider_multi_phase. +/// Weight functions needed for `pallet_election_provider_multi_phase`. pub trait WeightInfo { fn on_initialize_nothing() -> Weight; fn on_initialize_open_signed() -> Weight; @@ -64,169 +63,171 @@ pub trait WeightInfo { fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight; } -/// Weights for pallet_election_provider_multi_phase using the Substrate node and recommended hardware. +/// Weights for `pallet_election_provider_multi_phase` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CurrentPlannedSession (r:1 w:0) - /// Proof: Staking CurrentPlannedSession (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasStartSessionIndex (r:1 w:0) - /// Proof: Staking ErasStartSessionIndex (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Babe EpochIndex (r:1 w:0) - /// Proof: Babe EpochIndex (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe GenesisSlot (r:1 w:0) - /// Proof: Babe GenesisSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe CurrentSlot (r:1 w:0) - /// Proof: Babe CurrentSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Staking ForceEra (r:1 w:0) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentPlannedSession` (r:1 w:0) + /// Proof: `Staking::CurrentPlannedSession` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Babe::EpochIndex` (r:1 w:0) + /// Proof: `Babe::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::GenesisSlot` (r:1 w:0) + /// Proof: `Babe::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::CurrentSlot` (r:1 w:0) + /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Staking::ForceEra` (r:1 w:0) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_nothing() -> Weight { // Proof Size summary in bytes: - // Measured: `1028` + // Measured: `1061` // Estimated: `3481` - // Minimum execution time: 22_089_000 picoseconds. - Weight::from_parts(22_677_000, 3481) + // Minimum execution time: 19_340_000 picoseconds. + Weight::from_parts(19_886_000, 3481) .saturating_add(T::DbWeight::get().reads(8_u64)) } - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_open_signed() -> Weight { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 11_986_000 picoseconds. - Weight::from_parts(12_445_000, 1633) + // Minimum execution time: 8_067_000 picoseconds. + Weight::from_parts(8_508_000, 1633) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_open_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 12_988_000 picoseconds. - Weight::from_parts(13_281_000, 1633) + // Minimum execution time: 8_810_000 picoseconds. + Weight::from_parts(9_061_000, 1633) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::QueuedSolution` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::QueuedSolution` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn finalize_signed_phase_accept_solution() -> Weight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 32_659_000 picoseconds. - Weight::from_parts(33_281_000, 3593) + // Minimum execution time: 24_339_000 picoseconds. + Weight::from_parts(25_322_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn finalize_signed_phase_reject_solution() -> Weight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 22_471_000 picoseconds. - Weight::from_parts(23_046_000, 3593) + // Minimum execution time: 16_635_000 picoseconds. + Weight::from_parts(17_497_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. fn create_snapshot_internal(v: u32, _t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 262_360_000 picoseconds. - Weight::from_parts(279_313_000, 0) - // Standard Error: 2_384 - .saturating_add(Weight::from_parts(176_415, 0).saturating_mul(v.into())) + // Minimum execution time: 170_730_000 picoseconds. + Weight::from_parts(175_009_000, 0) + // Standard Error: 2_010 + .saturating_add(Weight::from_parts(224_974, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionIndices (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionNextIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionsMap (max_values: None, max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase QueuedSolution (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Round (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionIndices` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionsMap` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionsMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::QueuedSolution` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::QueuedSolution` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `a` is `[500, 800]`. /// The range of component `d` is `[200, 400]`. fn elect_queued(a: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `371 + a * (768 ±0) + d * (48 ±0)` // Estimated: `3923 + a * (768 ±0) + d * (49 ±0)` - // Minimum execution time: 301_283_000 picoseconds. - Weight::from_parts(324_586_000, 3923) - // Standard Error: 4_763 - .saturating_add(Weight::from_parts(279_812, 0).saturating_mul(a.into())) + // Minimum execution time: 280_705_000 picoseconds. + Weight::from_parts(303_018_000, 3923) + // Standard Error: 4_633 + .saturating_add(Weight::from_parts(307_274, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) .saturating_add(Weight::from_parts(0, 768).saturating_mul(a.into())) .saturating_add(Weight::from_parts(0, 49).saturating_mul(d.into())) } - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) - /// Proof: TransactionPayment NextFeeMultiplier (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionIndices (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionNextIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionsMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionIndices` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionsMap` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionsMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn submit() -> Weight { // Proof Size summary in bytes: // Measured: `927` // Estimated: `2412` - // Minimum execution time: 52_276_000 picoseconds. - Weight::from_parts(53_846_000, 2412) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Minimum execution time: 43_405_000 picoseconds. + Weight::from_parts(45_734_000, 2412) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase QueuedSolution (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::QueuedSolution` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::QueuedSolution` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::MinimumUntrustedScore` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::MinimumUntrustedScore` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. /// The range of component `a` is `[500, 800]`. @@ -235,25 +236,25 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `253 + t * (32 ±0) + v * (553 ±0)` // Estimated: `1738 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 5_448_459_000 picoseconds. - Weight::from_parts(5_525_622_000, 1738) - // Standard Error: 21_478 - .saturating_add(Weight::from_parts(256_345, 0).saturating_mul(v.into())) - // Standard Error: 63_648 - .saturating_add(Weight::from_parts(5_103_224, 0).saturating_mul(a.into())) + // Minimum execution time: 5_059_092_000 picoseconds. + Weight::from_parts(5_263_076_000, 1738) + // Standard Error: 17_317 + .saturating_add(Weight::from_parts(384_051, 0).saturating_mul(v.into())) + // Standard Error: 51_319 + .saturating_add(Weight::from_parts(4_095_128, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) } - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::MinimumUntrustedScore` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::MinimumUntrustedScore` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. /// The range of component `a` is `[500, 800]`. @@ -262,180 +263,182 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `228 + t * (32 ±0) + v * (553 ±0)` // Estimated: `1713 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 4_724_399_000 picoseconds. - Weight::from_parts(4_886_472_000, 1713) - // Standard Error: 15_220 - .saturating_add(Weight::from_parts(365_569, 0).saturating_mul(v.into())) - // Standard Error: 45_104 - .saturating_add(Weight::from_parts(3_176_675, 0).saturating_mul(a.into())) + // Minimum execution time: 4_426_416_000 picoseconds. + Weight::from_parts(4_466_923_000, 1713) + // Standard Error: 15_415 + .saturating_add(Weight::from_parts(334_047, 0).saturating_mul(v.into())) + // Standard Error: 45_682 + .saturating_add(Weight::from_parts(3_097_318, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CurrentPlannedSession (r:1 w:0) - /// Proof: Staking CurrentPlannedSession (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasStartSessionIndex (r:1 w:0) - /// Proof: Staking ErasStartSessionIndex (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Babe EpochIndex (r:1 w:0) - /// Proof: Babe EpochIndex (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe GenesisSlot (r:1 w:0) - /// Proof: Babe GenesisSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe CurrentSlot (r:1 w:0) - /// Proof: Babe CurrentSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Staking ForceEra (r:1 w:0) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentPlannedSession` (r:1 w:0) + /// Proof: `Staking::CurrentPlannedSession` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Babe::EpochIndex` (r:1 w:0) + /// Proof: `Babe::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::GenesisSlot` (r:1 w:0) + /// Proof: `Babe::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::CurrentSlot` (r:1 w:0) + /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Staking::ForceEra` (r:1 w:0) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_nothing() -> Weight { // Proof Size summary in bytes: - // Measured: `1028` + // Measured: `1061` // Estimated: `3481` - // Minimum execution time: 22_089_000 picoseconds. - Weight::from_parts(22_677_000, 3481) + // Minimum execution time: 19_340_000 picoseconds. + Weight::from_parts(19_886_000, 3481) .saturating_add(RocksDbWeight::get().reads(8_u64)) } - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_open_signed() -> Weight { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 11_986_000 picoseconds. - Weight::from_parts(12_445_000, 1633) + // Minimum execution time: 8_067_000 picoseconds. + Weight::from_parts(8_508_000, 1633) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_open_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 12_988_000 picoseconds. - Weight::from_parts(13_281_000, 1633) + // Minimum execution time: 8_810_000 picoseconds. + Weight::from_parts(9_061_000, 1633) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::QueuedSolution` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::QueuedSolution` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn finalize_signed_phase_accept_solution() -> Weight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 32_659_000 picoseconds. - Weight::from_parts(33_281_000, 3593) + // Minimum execution time: 24_339_000 picoseconds. + Weight::from_parts(25_322_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn finalize_signed_phase_reject_solution() -> Weight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 22_471_000 picoseconds. - Weight::from_parts(23_046_000, 3593) + // Minimum execution time: 16_635_000 picoseconds. + Weight::from_parts(17_497_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. fn create_snapshot_internal(v: u32, _t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 262_360_000 picoseconds. - Weight::from_parts(279_313_000, 0) - // Standard Error: 2_384 - .saturating_add(Weight::from_parts(176_415, 0).saturating_mul(v.into())) + // Minimum execution time: 170_730_000 picoseconds. + Weight::from_parts(175_009_000, 0) + // Standard Error: 2_010 + .saturating_add(Weight::from_parts(224_974, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionIndices (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionNextIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionsMap (max_values: None, max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase QueuedSolution (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Round (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionIndices` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionsMap` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionsMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::QueuedSolution` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::QueuedSolution` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `a` is `[500, 800]`. /// The range of component `d` is `[200, 400]`. fn elect_queued(a: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `371 + a * (768 ±0) + d * (48 ±0)` // Estimated: `3923 + a * (768 ±0) + d * (49 ±0)` - // Minimum execution time: 301_283_000 picoseconds. - Weight::from_parts(324_586_000, 3923) - // Standard Error: 4_763 - .saturating_add(Weight::from_parts(279_812, 0).saturating_mul(a.into())) + // Minimum execution time: 280_705_000 picoseconds. + Weight::from_parts(303_018_000, 3923) + // Standard Error: 4_633 + .saturating_add(Weight::from_parts(307_274, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) .saturating_add(Weight::from_parts(0, 768).saturating_mul(a.into())) .saturating_add(Weight::from_parts(0, 49).saturating_mul(d.into())) } - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) - /// Proof: TransactionPayment NextFeeMultiplier (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionIndices (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionNextIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionsMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionIndices` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionsMap` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionsMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn submit() -> Weight { // Proof Size summary in bytes: // Measured: `927` // Estimated: `2412` - // Minimum execution time: 52_276_000 picoseconds. - Weight::from_parts(53_846_000, 2412) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Minimum execution time: 43_405_000 picoseconds. + Weight::from_parts(45_734_000, 2412) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase QueuedSolution (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::QueuedSolution` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::QueuedSolution` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::MinimumUntrustedScore` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::MinimumUntrustedScore` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. /// The range of component `a` is `[500, 800]`. @@ -444,25 +447,25 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `253 + t * (32 ±0) + v * (553 ±0)` // Estimated: `1738 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 5_448_459_000 picoseconds. - Weight::from_parts(5_525_622_000, 1738) - // Standard Error: 21_478 - .saturating_add(Weight::from_parts(256_345, 0).saturating_mul(v.into())) - // Standard Error: 63_648 - .saturating_add(Weight::from_parts(5_103_224, 0).saturating_mul(a.into())) + // Minimum execution time: 5_059_092_000 picoseconds. + Weight::from_parts(5_263_076_000, 1738) + // Standard Error: 17_317 + .saturating_add(Weight::from_parts(384_051, 0).saturating_mul(v.into())) + // Standard Error: 51_319 + .saturating_add(Weight::from_parts(4_095_128, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) } - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::MinimumUntrustedScore` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::MinimumUntrustedScore` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. /// The range of component `a` is `[500, 800]`. @@ -471,12 +474,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `228 + t * (32 ±0) + v * (553 ±0)` // Estimated: `1713 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 4_724_399_000 picoseconds. - Weight::from_parts(4_886_472_000, 1713) - // Standard Error: 15_220 - .saturating_add(Weight::from_parts(365_569, 0).saturating_mul(v.into())) - // Standard Error: 45_104 - .saturating_add(Weight::from_parts(3_176_675, 0).saturating_mul(a.into())) + // Minimum execution time: 4_426_416_000 picoseconds. + Weight::from_parts(4_466_923_000, 1713) + // Standard Error: 15_415 + .saturating_add(Weight::from_parts(334_047, 0).saturating_mul(v.into())) + // Standard Error: 45_682 + .saturating_add(Weight::from_parts(3_097_318, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 882b894bb22fcdc38ab26c21301f34344b1c1093..7fade251e6d1c62b1b64a7ec1a86e29e0e18f40d 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -62,7 +62,7 @@ pub const INIT_TIMESTAMP: BlockNumber = 30_000; pub const BLOCK_TIME: BlockNumber = 1000; type Block = frame_system::mocking::MockBlockU32; -type Extrinsic = testing::TestXt; +type Extrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Runtime { @@ -699,7 +699,7 @@ pub fn roll_to_with_ocw(n: BlockNumber, pool: Arc>, delay_solu for encoded in &pool.read().transactions { let extrinsic = Extrinsic::decode(&mut &encoded[..]).unwrap(); - let _ = match extrinsic.call { + let _ = match extrinsic.function { RuntimeCall::ElectionProviderMultiPhase( call @ Call::submit_unsigned { .. }, ) => { diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index a078361a5f713b5d267fdc1002ac361f95a3faef..e08412a6e87f49a9d66377ab07e57b5792a9ee6b 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -188,7 +188,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] @@ -1422,7 +1422,7 @@ mod tests { pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = - sp_runtime::generic::UncheckedExtrinsic; + sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test diff --git a/substrate/frame/elections-phragmen/src/weights.rs b/substrate/frame/elections-phragmen/src/weights.rs index b7ed13dae9f734e524072bcd6ac5e116fca632bc..cd67918e85b2f60cb30ac06a38d097b430c6812e 100644 --- a/substrate/frame/elections-phragmen/src/weights.rs +++ b/substrate/frame/elections-phragmen/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_elections_phragmen +//! Autogenerated weights for `pallet_elections_phragmen` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/elections-phragmen/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/elections-phragmen/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_elections_phragmen. +/// Weight functions needed for `pallet_elections_phragmen`. pub trait WeightInfo { fn vote_equal(v: u32, ) -> Weight; fn vote_more(v: u32, ) -> Weight; @@ -66,165 +65,165 @@ pub trait WeightInfo { fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight; } -/// Weights for pallet_elections_phragmen using the Substrate node and recommended hardware. +/// Weights for `pallet_elections_phragmen` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 33_028_000 picoseconds. - Weight::from_parts(34_073_914, 4764) - // Standard Error: 3_474 - .saturating_add(Weight::from_parts(205_252, 0).saturating_mul(v.into())) + // Minimum execution time: 29_390_000 picoseconds. + Weight::from_parts(30_525_476, 4764) + // Standard Error: 3_185 + .saturating_add(Weight::from_parts(143_073, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `371 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 45_725_000 picoseconds. - Weight::from_parts(47_169_586, 4764) - // Standard Error: 5_148 - .saturating_add(Weight::from_parts(213_742, 0).saturating_mul(v.into())) + // Minimum execution time: 39_765_000 picoseconds. + Weight::from_parts(41_374_102, 4764) + // Standard Error: 4_310 + .saturating_add(Weight::from_parts(153_015, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 45_519_000 picoseconds. - Weight::from_parts(47_339_108, 4764) - // Standard Error: 5_501 - .saturating_add(Weight::from_parts(195_247, 0).saturating_mul(v.into())) + // Minimum execution time: 39_647_000 picoseconds. + Weight::from_parts(41_474_523, 4764) + // Standard Error: 5_503 + .saturating_add(Weight::from_parts(149_029, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn remove_voter() -> Weight { // Proof Size summary in bytes: // Measured: `925` // Estimated: `4764` - // Minimum execution time: 50_386_000 picoseconds. - Weight::from_parts(51_378_000, 4764) + // Minimum execution time: 41_882_000 picoseconds. + Weight::from_parts(42_794_000, 4764) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Elections Candidates (r:1 w:1) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Candidates` (r:1 w:1) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 64]`. fn submit_candidacy(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1570 + c * (48 ±0)` // Estimated: `3055 + c * (48 ±0)` - // Minimum execution time: 38_987_000 picoseconds. - Weight::from_parts(41_302_276, 3055) - // Standard Error: 2_047 - .saturating_add(Weight::from_parts(125_200, 0).saturating_mul(c.into())) + // Minimum execution time: 33_719_000 picoseconds. + Weight::from_parts(35_017_073, 3055) + // Standard Error: 1_587 + .saturating_add(Weight::from_parts(121_130, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) } - /// Storage: Elections Candidates (r:1 w:1) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Candidates` (r:1 w:1) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 64]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `285 + c * (48 ±0)` // Estimated: `1770 + c * (48 ±0)` - // Minimum execution time: 33_510_000 picoseconds. - Weight::from_parts(34_947_760, 1770) - // Standard Error: 1_781 - .saturating_add(Weight::from_parts(78_851, 0).saturating_mul(c.into())) + // Minimum execution time: 27_263_000 picoseconds. + Weight::from_parts(28_215_666, 1770) + // Standard Error: 1_196 + .saturating_add(Weight::from_parts(86_804, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) } - /// Storage: Elections Members (r:1 w:1) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:1) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:0 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_members() -> Weight { // Proof Size summary in bytes: - // Measured: `1900` - // Estimated: `3385` - // Minimum execution time: 50_603_000 picoseconds. - Weight::from_parts(51_715_000, 3385) + // Measured: `1933` + // Estimated: `3418` + // Minimum execution time: 41_531_000 picoseconds. + Weight::from_parts(42_937_000, 3418) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_runners_up() -> Weight { // Proof Size summary in bytes: // Measured: `880` // Estimated: `2365` - // Minimum execution time: 33_441_000 picoseconds. - Weight::from_parts(34_812_000, 2365) + // Minimum execution time: 27_680_000 picoseconds. + Weight::from_parts(28_810_000, 2365) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn remove_member_without_replacement() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -232,87 +231,90 @@ impl WeightInfo for SubstrateWeight { // Minimum execution time: 2_000_000_000_000 picoseconds. Weight::from_parts(2_000_000_000_000, 0) } - /// Storage: Elections Members (r:1 w:1) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:1) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:0 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn remove_member_with_replacement() -> Weight { // Proof Size summary in bytes: - // Measured: `1900` + // Measured: `1933` // Estimated: `3593` - // Minimum execution time: 57_289_000 picoseconds. - Weight::from_parts(58_328_000, 3593) + // Minimum execution time: 45_333_000 picoseconds. + Weight::from_parts(46_523_000, 3593) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Elections Voting (r:513 w:512) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Balances Locks (r:512 w:512) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:512 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:512 w:512) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Elections::Voting` (r:257 w:256) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:256 w:256) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:256 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:256 w:256) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `v` is `[256, 512]`. /// The range of component `d` is `[0, 256]`. - fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { + fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1149 + v * (811 ±0)` - // Estimated: `4621 + v * (3774 ±0)` - // Minimum execution time: 18_774_231_000 picoseconds. - Weight::from_parts(18_933_040_000, 4621) - // Standard Error: 301_534 - .saturating_add(Weight::from_parts(44_306_903, 0).saturating_mul(v.into())) + // Measured: `0 + d * (818 ±0) + v * (57 ±0)` + // Estimated: `24906 + d * (3774 ±1) + v * (24 ±0)` + // Minimum execution time: 5_620_000 picoseconds. + Weight::from_parts(5_817_000, 24906) + // Standard Error: 18_357 + .saturating_add(Weight::from_parts(106_164, 0).saturating_mul(v.into())) + // Standard Error: 39_980 + .saturating_add(Weight::from_parts(52_456_337, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(v.into()))) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 3774).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(d.into()))) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(d.into()))) + .saturating_add(Weight::from_parts(0, 3774).saturating_mul(d.into())) + .saturating_add(Weight::from_parts(0, 24).saturating_mul(v.into())) } - /// Storage: Elections Candidates (r:1 w:1) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:1) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:513 w:0) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:44 w:44) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Elections ElectionRounds (r:1 w:1) - /// Proof Skipped: Elections ElectionRounds (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:0 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Candidates` (r:1 w:1) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:1) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:513 w:0) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:44 w:44) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Elections::ElectionRounds` (r:1 w:1) + /// Proof: `Elections::ElectionRounds` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:0 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:0 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 64]`. /// The range of component `v` is `[1, 512]`. /// The range of component `e` is `[512, 8192]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + e * (28 ±0) + v * (606 ±0)` - // Estimated: `178887 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` - // Minimum execution time: 1_281_877_000 picoseconds. - Weight::from_parts(1_288_147_000, 178887) - // Standard Error: 528_851 - .saturating_add(Weight::from_parts(17_761_407, 0).saturating_mul(v.into())) - // Standard Error: 33_932 - .saturating_add(Weight::from_parts(698_277, 0).saturating_mul(e.into())) + // Estimated: `178920 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` + // Minimum execution time: 1_082_582_000 picoseconds. + Weight::from_parts(1_084_730_000, 178920) + // Standard Error: 594_096 + .saturating_add(Weight::from_parts(19_096_288, 0).saturating_mul(v.into())) + // Standard Error: 38_118 + .saturating_add(Weight::from_parts(792_945, 0).saturating_mul(e.into())) .saturating_add(T::DbWeight::get().reads(21_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) @@ -324,164 +326,164 @@ impl WeightInfo for SubstrateWeight { } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 33_028_000 picoseconds. - Weight::from_parts(34_073_914, 4764) - // Standard Error: 3_474 - .saturating_add(Weight::from_parts(205_252, 0).saturating_mul(v.into())) + // Minimum execution time: 29_390_000 picoseconds. + Weight::from_parts(30_525_476, 4764) + // Standard Error: 3_185 + .saturating_add(Weight::from_parts(143_073, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `371 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 45_725_000 picoseconds. - Weight::from_parts(47_169_586, 4764) - // Standard Error: 5_148 - .saturating_add(Weight::from_parts(213_742, 0).saturating_mul(v.into())) + // Minimum execution time: 39_765_000 picoseconds. + Weight::from_parts(41_374_102, 4764) + // Standard Error: 4_310 + .saturating_add(Weight::from_parts(153_015, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 45_519_000 picoseconds. - Weight::from_parts(47_339_108, 4764) - // Standard Error: 5_501 - .saturating_add(Weight::from_parts(195_247, 0).saturating_mul(v.into())) + // Minimum execution time: 39_647_000 picoseconds. + Weight::from_parts(41_474_523, 4764) + // Standard Error: 5_503 + .saturating_add(Weight::from_parts(149_029, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn remove_voter() -> Weight { // Proof Size summary in bytes: // Measured: `925` // Estimated: `4764` - // Minimum execution time: 50_386_000 picoseconds. - Weight::from_parts(51_378_000, 4764) + // Minimum execution time: 41_882_000 picoseconds. + Weight::from_parts(42_794_000, 4764) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Elections Candidates (r:1 w:1) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Candidates` (r:1 w:1) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 64]`. fn submit_candidacy(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1570 + c * (48 ±0)` // Estimated: `3055 + c * (48 ±0)` - // Minimum execution time: 38_987_000 picoseconds. - Weight::from_parts(41_302_276, 3055) - // Standard Error: 2_047 - .saturating_add(Weight::from_parts(125_200, 0).saturating_mul(c.into())) + // Minimum execution time: 33_719_000 picoseconds. + Weight::from_parts(35_017_073, 3055) + // Standard Error: 1_587 + .saturating_add(Weight::from_parts(121_130, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) } - /// Storage: Elections Candidates (r:1 w:1) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Candidates` (r:1 w:1) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 64]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `285 + c * (48 ±0)` // Estimated: `1770 + c * (48 ±0)` - // Minimum execution time: 33_510_000 picoseconds. - Weight::from_parts(34_947_760, 1770) - // Standard Error: 1_781 - .saturating_add(Weight::from_parts(78_851, 0).saturating_mul(c.into())) + // Minimum execution time: 27_263_000 picoseconds. + Weight::from_parts(28_215_666, 1770) + // Standard Error: 1_196 + .saturating_add(Weight::from_parts(86_804, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) } - /// Storage: Elections Members (r:1 w:1) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:1) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:0 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_members() -> Weight { // Proof Size summary in bytes: - // Measured: `1900` - // Estimated: `3385` - // Minimum execution time: 50_603_000 picoseconds. - Weight::from_parts(51_715_000, 3385) + // Measured: `1933` + // Estimated: `3418` + // Minimum execution time: 41_531_000 picoseconds. + Weight::from_parts(42_937_000, 3418) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_runners_up() -> Weight { // Proof Size summary in bytes: // Measured: `880` // Estimated: `2365` - // Minimum execution time: 33_441_000 picoseconds. - Weight::from_parts(34_812_000, 2365) + // Minimum execution time: 27_680_000 picoseconds. + Weight::from_parts(28_810_000, 2365) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn remove_member_without_replacement() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -489,87 +491,90 @@ impl WeightInfo for () { // Minimum execution time: 2_000_000_000_000 picoseconds. Weight::from_parts(2_000_000_000_000, 0) } - /// Storage: Elections Members (r:1 w:1) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:1) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:0 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn remove_member_with_replacement() -> Weight { // Proof Size summary in bytes: - // Measured: `1900` + // Measured: `1933` // Estimated: `3593` - // Minimum execution time: 57_289_000 picoseconds. - Weight::from_parts(58_328_000, 3593) + // Minimum execution time: 45_333_000 picoseconds. + Weight::from_parts(46_523_000, 3593) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Elections Voting (r:513 w:512) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Balances Locks (r:512 w:512) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:512 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:512 w:512) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Elections::Voting` (r:257 w:256) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:256 w:256) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:256 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:256 w:256) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `v` is `[256, 512]`. /// The range of component `d` is `[0, 256]`. - fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { + fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1149 + v * (811 ±0)` - // Estimated: `4621 + v * (3774 ±0)` - // Minimum execution time: 18_774_231_000 picoseconds. - Weight::from_parts(18_933_040_000, 4621) - // Standard Error: 301_534 - .saturating_add(Weight::from_parts(44_306_903, 0).saturating_mul(v.into())) + // Measured: `0 + d * (818 ±0) + v * (57 ±0)` + // Estimated: `24906 + d * (3774 ±1) + v * (24 ±0)` + // Minimum execution time: 5_620_000 picoseconds. + Weight::from_parts(5_817_000, 24906) + // Standard Error: 18_357 + .saturating_add(Weight::from_parts(106_164, 0).saturating_mul(v.into())) + // Standard Error: 39_980 + .saturating_add(Weight::from_parts(52_456_337, 0).saturating_mul(d.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(v.into()))) - .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 3774).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(d.into()))) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(d.into()))) + .saturating_add(Weight::from_parts(0, 3774).saturating_mul(d.into())) + .saturating_add(Weight::from_parts(0, 24).saturating_mul(v.into())) } - /// Storage: Elections Candidates (r:1 w:1) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:1) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:513 w:0) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:44 w:44) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Elections ElectionRounds (r:1 w:1) - /// Proof Skipped: Elections ElectionRounds (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:0 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Candidates` (r:1 w:1) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:1) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:513 w:0) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:44 w:44) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Elections::ElectionRounds` (r:1 w:1) + /// Proof: `Elections::ElectionRounds` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:0 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:0 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 64]`. /// The range of component `v` is `[1, 512]`. /// The range of component `e` is `[512, 8192]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + e * (28 ±0) + v * (606 ±0)` - // Estimated: `178887 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` - // Minimum execution time: 1_281_877_000 picoseconds. - Weight::from_parts(1_288_147_000, 178887) - // Standard Error: 528_851 - .saturating_add(Weight::from_parts(17_761_407, 0).saturating_mul(v.into())) - // Standard Error: 33_932 - .saturating_add(Weight::from_parts(698_277, 0).saturating_mul(e.into())) + // Estimated: `178920 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` + // Minimum execution time: 1_082_582_000 picoseconds. + Weight::from_parts(1_084_730_000, 178920) + // Standard Error: 594_096 + .saturating_add(Weight::from_parts(19_096_288, 0).saturating_mul(v.into())) + // Standard Error: 38_118 + .saturating_add(Weight::from_parts(792_945, 0).saturating_mul(e.into())) .saturating_add(RocksDbWeight::get().reads(21_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index eb6355edd312a1ff5f865c235ffd034f085a3027..45c7440eb89135eca98cf00a4aaf5d6ad2c094ca 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -23,6 +23,7 @@ pallet-example-frame-crate = { path = "frame-crate", default-features = false } pallet-example-kitchensink = { path = "kitchensink", default-features = false } pallet-example-offchain-worker = { path = "offchain-worker", default-features = false } pallet-example-split = { path = "split", default-features = false } +pallet-example-single-block-migrations = { path = "single-block-migrations", default-features = false } pallet-example-tasks = { path = "tasks", default-features = false } [features] @@ -34,6 +35,7 @@ std = [ "pallet-example-frame-crate/std", "pallet-example-kitchensink/std", "pallet-example-offchain-worker/std", + "pallet-example-single-block-migrations/std", "pallet-example-split/std", "pallet-example-tasks/std", ] @@ -43,6 +45,7 @@ try-runtime = [ "pallet-example-basic/try-runtime", "pallet-example-kitchensink/try-runtime", "pallet-example-offchain-worker/try-runtime", + "pallet-example-single-block-migrations/try-runtime", "pallet-example-split/try-runtime", "pallet-example-tasks/try-runtime", ] diff --git a/substrate/frame/examples/basic/src/lib.rs b/substrate/frame/examples/basic/src/lib.rs index 12cadc969fd74288fc2ca99ac014fa98a603d56d..94b2f276e00b7482febf9e69fe6af15b39e1b09a 100644 --- a/substrate/frame/examples/basic/src/lib.rs +++ b/substrate/frame/examples/basic/src/lib.rs @@ -46,9 +46,10 @@ //! use the [`Config::WeightInfo`] trait to calculate call weights. This can also be overridden, //! as demonstrated by [`Call::set_dummy`]. //! - A private function that performs a storage update. -//! - A simple signed extension implementation (see: [`sp_runtime::traits::SignedExtension`]) which -//! increases the priority of the [`Call::set_dummy`] if it's present and drops any transaction -//! with an encoded length higher than 200 bytes. +//! - A simple transaction extension implementation (see: +//! [`sp_runtime::traits::TransactionExtension`]) which increases the priority of the +//! [`Call::set_dummy`] if it's present and drops any transaction with an encoded length higher +//! than 200 bytes. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -64,10 +65,12 @@ use frame_system::ensure_signed; use log::info; use scale_info::TypeInfo; use sp_runtime::{ - traits::{Bounded, DispatchInfoOf, SaturatedConversion, Saturating, SignedExtension}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + impl_tx_ext_default, + traits::{ + Bounded, DispatchInfoOf, OriginOf, SaturatedConversion, Saturating, TransactionExtension, + TransactionExtensionBase, ValidateResult, }, + transaction_validity::{InvalidTransaction, ValidTransaction}, }; use sp_std::vec::Vec; @@ -440,8 +443,8 @@ impl Pallet { // Similar to other FRAME pallets, your pallet can also define a signed extension and perform some // checks and [pre/post]processing [before/after] the transaction. A signed extension can be any -// decodable type that implements `SignedExtension`. See the trait definition for the full list of -// bounds. As a convention, you can follow this approach to create an extension for your pallet: +// decodable type that implements `TransactionExtension`. See the trait definition for the full list +// of bounds. As a convention, you can follow this approach to create an extension for your pallet: // - If the extension does not carry any data, then use a tuple struct with just a `marker` // (needed for the compiler to accept `T: Config`) will suffice. // - Otherwise, create a tuple struct which contains the external data. Of course, for the entire @@ -455,18 +458,18 @@ impl Pallet { // // Using the extension, you can add some hooks to the life cycle of each transaction. Note that by // default, an extension is applied to all `Call` functions (i.e. all transactions). the `Call` enum -// variant is given to each function of `SignedExtension`. Hence, you can filter based on pallet or -// a particular call if needed. +// variant is given to each function of `TransactionExtension`. Hence, you can filter based on +// pallet or a particular call if needed. // // Some extra information, such as encoded length, some static dispatch info like weight and the // sender of the transaction (if signed) are also provided. // // The full list of hooks that can be added to a signed extension can be found -// [here](https://paritytech.github.io/polkadot-sdk/master/sp_runtime/traits/trait.SignedExtension.html). +// [here](https://paritytech.github.io/polkadot-sdk/master/sp_runtime/traits/trait.TransactionExtension.html). // // The signed extensions are aggregated in the runtime file of a substrate chain. All extensions // should be aggregated in a tuple and passed to the `CheckedExtrinsic` and `UncheckedExtrinsic` -// types defined in the runtime. Lookup `pub type SignedExtra = (...)` in `node/runtime` and +// types defined in the runtime. Lookup `pub type TxExtension = (...)` in `node/runtime` and // `node-template` for an example of this. /// A simple signed extension that checks for the `set_dummy` call. In that case, it increases the @@ -484,52 +487,45 @@ impl core::fmt::Debug for WatchDummy { } } -impl SignedExtension for WatchDummy +impl TransactionExtensionBase for WatchDummy { + const IDENTIFIER: &'static str = "WatchDummy"; + type Implicit = (); +} +impl + TransactionExtension<::RuntimeCall, Context> for WatchDummy where ::RuntimeCall: IsSubType>, { - const IDENTIFIER: &'static str = "WatchDummy"; - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); type Pre = (); - - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } + type Val = (); fn validate( &self, - _who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: OriginOf<::RuntimeCall>, + call: &::RuntimeCall, + _info: &DispatchInfoOf<::RuntimeCall>, len: usize, - ) -> TransactionValidity { + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult::RuntimeCall> { // if the transaction is too big, just drop it. if len > 200 { - return InvalidTransaction::ExhaustsResources.into() + return Err(InvalidTransaction::ExhaustsResources.into()) } // check for `set_dummy` - match call.is_sub_type() { + let validity = match call.is_sub_type() { Some(Call::set_dummy { .. }) => { sp_runtime::print("set_dummy was received."); let valid_tx = ValidTransaction { priority: Bounded::max_value(), ..Default::default() }; - Ok(valid_tx) + valid_tx }, - _ => Ok(Default::default()), - } + _ => Default::default(), + }; + Ok((validity, (), origin)) } + impl_tx_ext_default!(::RuntimeCall; Context; prepare); } diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index 207e46e428ddc62359621bd11eb226e54ab3f92e..e460ad0992f064c72aaca7a13ce33db6d96d5dcd 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -27,7 +27,7 @@ use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, + traits::{BlakeTwo256, DispatchTransaction, IdentityLookup}, BuildStorage, }; // Reexport crate as its pallet name for construct_runtime. @@ -158,13 +158,16 @@ fn signed_ext_watch_dummy_works() { assert_eq!( WatchDummy::(PhantomData) - .validate(&1, &call, &info, 150) + .validate_only(Some(1).into(), &call, &info, 150) .unwrap() + .0 .priority, u64::MAX, ); assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, &info, 250), + WatchDummy::(PhantomData) + .validate_only(Some(1).into(), &call, &info, 250) + .unwrap_err(), InvalidTransaction::ExhaustsResources.into(), ); }) diff --git a/substrate/frame/examples/default-config/src/lib.rs b/substrate/frame/examples/default-config/src/lib.rs index cd1653e6c764358165c8306d4810c1d4c9bea3b4..224faf9dfd431a0ce9281c406738b7aa2fddb0f6 100644 --- a/substrate/frame/examples/default-config/src/lib.rs +++ b/substrate/frame/examples/default-config/src/lib.rs @@ -108,7 +108,7 @@ pub mod pallet { } /// A type providing default configurations for this pallet in another environment. Examples - /// could be a parachain, or a solo-chain. + /// could be a parachain, or a solochain. /// /// Appropriate derive for `frame_system::DefaultConfig` needs to be provided. In this /// example, we simple derive `frame_system::config_preludes::TestDefaultConfig` again. @@ -149,7 +149,7 @@ pub mod tests { } ); - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { // these items are defined by frame-system as `no_default`, so we must specify them here. type Block = Block; diff --git a/substrate/frame/examples/offchain-worker/src/tests.rs b/substrate/frame/examples/offchain-worker/src/tests.rs index ea37a2da493d69ae6550c8d999fe2eb9dcfefad1..81f2a40840badd286be14b381df1db904aeeb2b1 100644 --- a/substrate/frame/examples/offchain-worker/src/tests.rs +++ b/substrate/frame/examples/offchain-worker/src/tests.rs @@ -30,7 +30,7 @@ use sp_core::{ use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; use sp_runtime::{ - testing::TestXt, + generic::UncheckedExtrinsic, traits::{BlakeTwo256, Extrinsic as ExtrinsicT, IdentifyAccount, IdentityLookup, Verify}, RuntimeAppPublic, }; @@ -73,7 +73,7 @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<16>; } -type Extrinsic = TestXt; +type Extrinsic = UncheckedExtrinsic; type AccountId = <::Signer as IdentifyAccount>::AccountId; impl frame_system::offchain::SigningTypes for Test { @@ -99,7 +99,7 @@ where _account: AccountId, nonce: u64, ) -> Option<(RuntimeCall, ::SignaturePayload)> { - Some((call, (nonce, ()))) + Some((call, (nonce, (), ()))) } } @@ -219,8 +219,8 @@ fn should_submit_signed_transaction_on_chain() { let tx = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature.unwrap().0, 0); - assert_eq!(tx.call, RuntimeCall::Example(crate::Call::submit_price { price: 15523 })); + assert!(matches!(tx.preamble, sp_runtime::generic::Preamble::Signed(0, (), ()))); + assert_eq!(tx.function, RuntimeCall::Example(crate::Call::submit_price { price: 15523 })); }); } @@ -259,11 +259,11 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { // then let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); + assert!(tx.is_inherent()); if let RuntimeCall::Example(crate::Call::submit_price_unsigned_with_signed_payload { price_payload: body, signature, - }) = tx.call + }) = tx.function { assert_eq!(body, price_payload); @@ -313,11 +313,11 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { // then let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); + assert!(tx.is_inherent()); if let RuntimeCall::Example(crate::Call::submit_price_unsigned_with_signed_payload { price_payload: body, signature, - }) = tx.call + }) = tx.function { assert_eq!(body, price_payload); @@ -353,9 +353,9 @@ fn should_submit_raw_unsigned_transaction_on_chain() { let tx = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); + assert!(tx.is_inherent()); assert_eq!( - tx.call, + tx.function, RuntimeCall::Example(crate::Call::submit_price_unsigned { block_number: 1, price: 15523 diff --git a/substrate/frame/examples/single-block-migrations/Cargo.toml b/substrate/frame/examples/single-block-migrations/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..5059b2433c77139568d99326473cded73a5af9e9 --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "pallet-example-single-block-migrations" +version = "0.0.1" +authors.workspace = true +edition.workspace = true +license = "MIT-0" +homepage = "https://substrate.io" +repository.workspace = true +description = "FRAME example pallet demonstrating best-practices for writing storage migrations." +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +docify = { version = "0.2.3", default-features = false } +log = { version = "0.4.20", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +frame-support = { path = "../../support", default-features = false } +frame-executive = { path = "../../executive", default-features = false } +frame-system = { path = "../../system", default-features = false } +frame-try-runtime = { path = "../../try-runtime", default-features = false, optional = true } +pallet-balances = { path = "../../balances", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-version = { path = "../../../primitives/version", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-executive/std", + "frame-support/std", + "frame-system/std", + "frame-try-runtime/std", + "log/std", + "pallet-balances/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "sp-version/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/examples/single-block-migrations/src/lib.rs b/substrate/frame/examples/single-block-migrations/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..86a9e5d6e95bbeaa0230b81153c3f4149f332436 --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/lib.rs @@ -0,0 +1,213 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Single Block Migration Example Pallet +//! +//! An example pallet demonstrating best-practices for writing single-block migrations in the +//! context of upgrading pallet storage. +//! +//! ## Forwarning +//! +//! Single block migrations **MUST** execute in a single block, therefore when executed on a +//! parachain are only appropriate when guaranteed to not exceed block weight limits. If a +//! parachain submits a block that exceeds the block weight limit it will **brick the chain**! +//! +//! If weight is a concern or you are not sure which type of migration to use, you should probably +//! use a multi-block migration. +//! +//! TODO: Link above to multi-block migration example. +//! +//! ## Pallet Overview +//! +//! This example pallet contains a single storage item [`Value`](pallet::Value), which may be set by +//! any signed origin by calling the [`set_value`](crate::Call::set_value) extrinsic. +//! +//! For the purposes of this exercise, we imagine that in [`StorageVersion`] V0 of this pallet +//! [`Value`](pallet::Value) is a `u32`, and this what is currently stored on-chain. +//! +//! ```ignore +//! // (Old) Storage Version V0 representation of `Value` +//! #[pallet::storage] +//! pub type Value = StorageValue<_, u32>; +//! ``` +//! +//! In [`StorageVersion`] V1 of the pallet a new struct [`CurrentAndPreviousValue`] is introduced: +#![doc = docify::embed!("src/lib.rs", CurrentAndPreviousValue)] +//! and [`Value`](pallet::Value) is updated to store this new struct instead of a `u32`: +#![doc = docify::embed!("src/lib.rs", Value)] +//! +//! In StorageVersion V1 of the pallet when [`set_value`](crate::Call::set_value) is called, the +//! new value is stored in the `current` field of [`CurrentAndPreviousValue`], and the previous +//! value (if it exists) is stored in the `previous` field. +#![doc = docify::embed!("src/lib.rs", pallet_calls)] +//! +//! ## Why a migration is necessary +//! +//! Without a migration, there will be a discrepancy between the on-chain storage for [`Value`] (in +//! V0 it is a `u32`) and the current storage for [`Value`] (in V1 it was changed to a +//! [`CurrentAndPreviousValue`] struct). +//! +//! The on-chain storage for [`Value`] would be a `u32` but the runtime would try to read it as a +//! [`CurrentAndPreviousValue`]. This would result in unacceptable undefined behavior. +//! +//! ## Adding a migration module +//! +//! Writing a pallets migrations in a seperate module is strongly recommended. +//! +//! Here's how the migration module is defined for this pallet: +//! +//! ```text +//! substrate/frame/examples/single-block-migrations/src/ +//! ├── lib.rs <-- pallet definition +//! ├── Cargo.toml <-- pallet manifest +//! └── migrations/ +//! ├── mod.rs <-- migrations module definition +//! └── v1.rs <-- migration logic for the V0 to V1 transition +//! ``` +//! +//! This structure allows keeping migration logic separate from the pallet logic and +//! easily adding new migrations in the future. +//! +//! ## Writing the Migration +//! +//! All code related to the migration can be found under +//! [`v1.rs`](migrations::v1). +//! +//! See the migration source code for detailed comments. +//! +//! To keep the migration logic organised, it is split across additional modules: +//! +//! ### `mod v0` +//! +//! Here we define a [`storage_alias`](frame_support::storage_alias) for the old v0 [`Value`] +//! format. +//! +//! This allows reading the old v0 value from storage during the migration. +//! +//! ### `mod version_unchecked` +//! +//! Here we define our raw migration logic, +//! `version_unchecked::MigrateV0ToV1` which implements the [`OnRuntimeUpgrade`] trait. +//! +//! Importantly, it is kept in a private module so that it cannot be accidentally used in a runtime. +//! +//! Private modules cannot be referenced in docs, so please read the code directly. +//! +//! #### Standalone Struct or Pallet Hook? +//! +//! Note that the storage migration logic is attached to a standalone struct implementing +//! [`OnRuntimeUpgrade`], rather than implementing the +//! [`Hooks::on_runtime_upgrade`](frame_support::traits::Hooks::on_runtime_upgrade) hook directly on +//! the pallet. The pallet hook is better suited for special types of logic that need to execute on +//! every runtime upgrade, but not so much for one-off storage migrations. +//! +//! ### `pub mod versioned` +//! +//! Here, `version_unchecked::MigrateV0ToV1` is wrapped in a +//! [`VersionedMigration`] to define +//! [`versioned::MigrateV0ToV1`](crate::migrations::v1::versioned::MigrateV0ToV1), which may be used +//! in runtimes. +//! +//! Using [`VersionedMigration`] ensures that +//! - The migration only runs once when the on-chain storage version is `0` +//! - The on-chain storage version is updated to `1` after the migration executes +//! - Reads and writes from checking and setting the on-chain storage version are accounted for in +//! the final [`Weight`](frame_support::weights::Weight) +//! +//! This is the only public module exported from `v1`. +//! +//! ### `mod test` +//! +//! Here basic unit tests are defined for the migration. +//! +//! When writing migration tests, don't forget to check: +//! - `on_runtime_upgrade` returns the expected weight +//! - `post_upgrade` succeeds when given the bytes returned by `pre_upgrade` +//! - Pallet storage is in the expected state after the migration +//! +//! [`VersionedMigration`]: frame_support::migrations::VersionedMigration +//! [`GetStorageVersion`]: frame_support::traits::GetStorageVersion +//! [`OnRuntimeUpgrade`]: frame_support::traits::OnRuntimeUpgrade +//! [`MigrateV0ToV1`]: crate::migrations::v1::versioned::MigrationV0ToV1 + +// We make sure this pallet uses `no_std` for compiling to Wasm. +#![cfg_attr(not(feature = "std"), no_std)] +// allow non-camel-case names for storage version V0 value +#![allow(non_camel_case_types)] + +// Re-export pallet items so that they can be accessed from the crate namespace. +pub use pallet::*; + +// Export migrations so they may be used in the runtime. +pub mod migrations; +#[doc(hidden)] +mod mock; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::traits::StorageVersion; +use sp_runtime::RuntimeDebug; + +/// Example struct holding the most recently set [`u32`] and the +/// second most recently set [`u32`] (if one existed). +#[docify::export] +#[derive( + Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, scale_info::TypeInfo, MaxEncodedLen, +)] +pub struct CurrentAndPreviousValue { + /// The most recently set value. + pub current: u32, + /// The previous value, if one existed. + pub previous: Option, +} + +// Pallet for demonstrating storage migrations. +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// Define the current [`StorageVersion`] of the pallet. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + /// [`StorageVersion`] V1 of [`Value`]. + /// + /// Currently used. + #[docify::export] + #[pallet::storage] + pub type Value = StorageValue<_, CurrentAndPreviousValue>; + + #[docify::export(pallet_calls)] + #[pallet::call] + impl Pallet { + pub fn set_value(origin: OriginFor, value: u32) -> DispatchResult { + ensure_signed(origin)?; + + let previous = Value::::get().map(|v| v.current); + let new_struct = CurrentAndPreviousValue { current: value, previous }; + >::put(new_struct); + + Ok(()) + } + } +} diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/mod.rs b/substrate/frame/examples/single-block-migrations/src/migrations/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..80a33f69941aa1a49563971ab0c9e13a88ecda19 --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/migrations/mod.rs @@ -0,0 +1,20 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Module containing all logic associated with the example migration from +/// [`StorageVersion`](frame_support::traits::StorageVersion) V0 to V1. +pub mod v1; diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs new file mode 100644 index 0000000000000000000000000000000000000000..b46640a320207551ab55b1dc793574ce68c4c26c --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs @@ -0,0 +1,222 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{ + storage_alias, + traits::{Get, OnRuntimeUpgrade}, +}; + +#[cfg(feature = "try-runtime")] +use sp_std::vec::Vec; + +/// Collection of storage item formats from the previous storage version. +/// +/// Required so we can read values in the v0 storage format during the migration. +mod v0 { + use super::*; + + /// V0 type for [`crate::Value`]. + #[storage_alias] + pub type Value = StorageValue, u32>; +} + +/// Private module containing *version unchecked* migration logic. +/// +/// Should only be used by the [`VersionedMigration`](frame_support::migrations::VersionedMigration) +/// type in this module to create something to export. +/// +/// The unversioned migration should be kept private so the unversioned migration cannot +/// accidentally be used in any runtimes. +/// +/// For more about this pattern of keeping items private, see +/// - +/// - +mod version_unchecked { + use super::*; + + /// Implements [`OnRuntimeUpgrade`], migrating the state of this pallet from V0 to V1. + /// + /// In V0 of the template [`crate::Value`] is just a `u32`. In V1, it has been upgraded to + /// contain the struct [`crate::CurrentAndPreviousValue`]. + /// + /// In this migration, update the on-chain storage for the pallet to reflect the new storage + /// layout. + pub struct MigrateV0ToV1(sp_std::marker::PhantomData); + + impl OnRuntimeUpgrade for MigrateV0ToV1 { + /// Return the existing [`crate::Value`] so we can check that it was correctly set in + /// `version_unchecked::MigrateV0ToV1::post_upgrade`. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + + // Access the old value using the `storage_alias` type + let old_value = v0::Value::::get(); + // Return it as an encoded `Vec` + Ok(old_value.encode()) + } + + /// Migrate the storage from V0 to V1. + /// + /// - If the value doesn't exist, there is nothing to do. + /// - If the value exists, it is read and then written back to storage inside a + /// [`crate::CurrentAndPreviousValue`]. + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // Read the old value from storage + if let Some(old_value) = v0::Value::::take() { + // Write the new value to storage + let new = crate::CurrentAndPreviousValue { current: old_value, previous: None }; + crate::Value::::put(new); + // One read for the old value, one write for the new value + T::DbWeight::get().reads_writes(1, 1) + } else { + // One read for trying to access the old value + T::DbWeight::get().reads(1) + } + } + + /// Verifies the storage was migrated correctly. + /// + /// - If there was no old value, the new value should not be set. + /// - If there was an old value, the new value should be a + /// [`crate::CurrentAndPreviousValue`]. + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + use codec::Decode; + use frame_support::ensure; + + let maybe_old_value = Option::::decode(&mut &state[..]).map_err(|_| { + sp_runtime::TryRuntimeError::Other("Failed to decode old value from storage") + })?; + + match maybe_old_value { + Some(old_value) => { + let expected_new_value = + crate::CurrentAndPreviousValue { current: old_value, previous: None }; + let actual_new_value = crate::Value::::get(); + + ensure!(actual_new_value.is_some(), "New value not set"); + ensure!( + actual_new_value == Some(expected_new_value), + "New value not set correctly" + ); + }, + None => { + ensure!(crate::Value::::get().is_none(), "New value unexpectedly set"); + }, + }; + Ok(()) + } + } +} + +/// Public module containing *version checked* migration logic. +/// +/// This is the only module that should be exported from this module. +/// +/// See [`VersionedMigration`](frame_support::migrations::VersionedMigration) docs for more about +/// how it works. +pub mod versioned { + use super::*; + + /// `version_unchecked::MigrateV0ToV1` wrapped in a + /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), which ensures that: + /// - The migration only runs once when the on-chain storage version is 0 + /// - The on-chain storage version is updated to `1` after the migration executes + /// - Reads/Writes from checking/settings the on-chain storage version are accounted for + pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< + 0, // The migration will only execute when the on-chain storage version is 0 + 1, // The on-chain storage version will be set to 1 after the migration is complete + version_unchecked::MigrateV0ToV1, + crate::pallet::Pallet, + ::DbWeight, + >; +} + +/// Tests for our migration. +/// +/// When writing migration tests, it is important to check: +/// 1. `on_runtime_upgrade` returns the expected weight +/// 2. `post_upgrade` succeeds when given the bytes returned by `pre_upgrade` +/// 3. The storage is in the expected state after the migration +#[cfg(any(all(feature = "try-runtime", test), doc))] +mod test { + use super::*; + use crate::mock::{new_test_ext, MockRuntime}; + use frame_support::assert_ok; + use version_unchecked::MigrateV0ToV1; + + #[test] + fn handles_no_existing_value() { + new_test_ext().execute_with(|| { + // By default, no value should be set. Verify this assumption. + assert!(crate::Value::::get().is_none()); + assert!(v0::Value::::get().is_none()); + + // Get the pre_upgrade bytes + let bytes = match MigrateV0ToV1::::pre_upgrade() { + Ok(bytes) => bytes, + Err(e) => panic!("pre_upgrade failed: {:?}", e), + }; + + // Execute the migration + let weight = MigrateV0ToV1::::on_runtime_upgrade(); + + // Verify post_upgrade succeeds + assert_ok!(MigrateV0ToV1::::post_upgrade(bytes)); + + // The weight should be just 1 read for trying to access the old value. + assert_eq!(weight, ::DbWeight::get().reads(1)); + + // After the migration, no value should have been set. + assert!(crate::Value::::get().is_none()); + }) + } + + #[test] + fn handles_existing_value() { + new_test_ext().execute_with(|| { + // Set up an initial value + let initial_value = 42; + v0::Value::::put(initial_value); + + // Get the pre_upgrade bytes + let bytes = match MigrateV0ToV1::::pre_upgrade() { + Ok(bytes) => bytes, + Err(e) => panic!("pre_upgrade failed: {:?}", e), + }; + + // Execute the migration + let weight = MigrateV0ToV1::::on_runtime_upgrade(); + + // Verify post_upgrade succeeds + assert_ok!(MigrateV0ToV1::::post_upgrade(bytes)); + + // The weight used should be 1 read for the old value, and 1 write for the new + // value. + assert_eq!( + weight, + ::DbWeight::get().reads_writes(1, 1) + ); + + // After the migration, the new value should be set as the `current` value. + let expected_new_value = + crate::CurrentAndPreviousValue { current: initial_value, previous: None }; + assert_eq!(crate::Value::::get(), Some(expected_new_value)); + }) + } +} diff --git a/substrate/frame/examples/single-block-migrations/src/mock.rs b/substrate/frame/examples/single-block-migrations/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..02f72e01836f2462de2043cae737ea46ec2072ed --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/mock.rs @@ -0,0 +1,69 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(any(all(feature = "try-runtime", test), doc))] + +use crate::*; +use frame_support::{derive_impl, traits::ConstU64, weights::constants::ParityDbWeight}; + +// Re-export crate as its pallet name for construct_runtime. +use crate as pallet_example_storage_migration; + +type Block = frame_system::mocking::MockBlock; + +// For testing the pallet, we construct a mock runtime. +frame_support::construct_runtime!( + pub struct MockRuntime { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Example: pallet_example_storage_migration::{Pallet, Call, Storage}, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for MockRuntime { + type Block = Block; + type AccountData = pallet_balances::AccountData; + type DbWeight = ParityDbWeight; +} + +impl pallet_balances::Config for MockRuntime { + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = u64; + type DustRemoval = (); + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); +} + +impl Config for MockRuntime {} + +pub fn new_test_ext() -> sp_io::TestExternalities { + use sp_runtime::BuildStorage; + + let t = RuntimeGenesisConfig { system: Default::default(), balances: Default::default() } + .build_storage() + .unwrap(); + t.into() +} diff --git a/substrate/frame/examples/src/lib.rs b/substrate/frame/examples/src/lib.rs index f38bbe52dc114900e4b497cb17da6deb4406512c..dee23a41379fc8b732252b9fe4db39bf809699fd 100644 --- a/substrate/frame/examples/src/lib.rs +++ b/substrate/frame/examples/src/lib.rs @@ -43,6 +43,9 @@ //! - [`pallet_example_frame_crate`]: Example pallet showcasing how one can be //! built using only the `frame` umbrella crate. //! +//! - [`pallet_example_single_block_migrations`]: An example pallet demonstrating best-practices for +//! writing storage migrations. +//! //! - [`pallet_example_tasks`]: This pallet demonstrates the use of `Tasks` to execute service work. //! //! **Tip**: Use `cargo doc --package --open` to view each pallet's documentation. diff --git a/substrate/frame/examples/tasks/src/weights.rs b/substrate/frame/examples/tasks/src/weights.rs index 793af6e962201fd9f92e0260ea0e24f5bc39753d..c9ddea6f9a8ab4ed19d12f8db89fa3dff59cca3f 100644 --- a/substrate/frame/examples/tasks/src/weights.rs +++ b/substrate/frame/examples/tasks/src/weights.rs @@ -15,30 +15,31 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for `pallet_example_tasks` +//! Autogenerated weights for `tasks_example` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-02, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `MacBook.local`, CPU: `` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/release/node-template +// ./target/production/substrate-node // benchmark // pallet -// --chain -// dev -// --pallet -// pallet_example_tasks -// --extrinsic -// * -// --steps -// 20 -// --repeat -// 10 -// --output -// frame/examples/tasks/src/weights.rs +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=tasks_example +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/examples/tasks/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,37 +49,42 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_template. +/// Weight functions needed for `tasks_example`. pub trait WeightInfo { fn add_number_into_total() -> Weight; } -/// Weight functions for `pallet_example_kitchensink`. +/// Weights for `tasks_example` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Kitchensink OtherFoo (r:0 w:1) - /// Proof Skipped: Kitchensink OtherFoo (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TasksExample::Numbers` (r:1 w:1) + /// Proof: `TasksExample::Numbers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `TasksExample::Total` (r:1 w:1) + /// Proof: `TasksExample::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn add_number_into_total() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_000_000 picoseconds. - Weight::from_parts(1_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) + // Measured: `149` + // Estimated: `3614` + // Minimum execution time: 5_776_000 picoseconds. + Weight::from_parts(6_178_000, 3614) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } } +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Kitchensink OtherFoo (r:0 w:1) - /// Proof Skipped: Kitchensink OtherFoo (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TasksExample::Numbers` (r:1 w:1) + /// Proof: `TasksExample::Numbers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `TasksExample::Total` (r:1 w:1) + /// Proof: `TasksExample::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn add_number_into_total() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_000_000 picoseconds. - Weight::from_parts(1_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Measured: `149` + // Estimated: `3614` + // Minimum execution time: 5_776_000 picoseconds. + Weight::from_parts(6_178_000, 3614) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index a4ca265f6178218791bf83f6b8f259821e086a98..63285e4cb4939c10db3342b5d59eeffc63103d48 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -16,6 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +aquamarine = "0.3.2" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } @@ -44,6 +45,7 @@ default = ["std"] with-tracing = ["sp-tracing/with-tracing"] std = [ "codec/std", + "frame-support/experimental", "frame-support/std", "frame-system/std", "frame-try-runtime/std", diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index 48ff675f8082ddd0e7779c946b32dde66b171078..3028eaf318e0881c1b6f4d4ea6ac17adfe99a75f 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#![cfg_attr(not(feature = "std"), no_std)] + //! # Executive Module //! //! The Executive module acts as the orchestration layer for the runtime. It dispatches incoming @@ -35,6 +37,8 @@ //! - Finalize a block. //! - Start an off-chain worker. //! +//! The flow of their application in a block is explained in the [block flowchart](block_flowchart). +//! //! ### Implementations //! //! The Executive module provides the following implementations: @@ -114,17 +118,51 @@ //! pub type Executive = executive::Executive; //! ``` -#![cfg_attr(not(feature = "std"), no_std)] +#[cfg(doc)] +#[cfg_attr(doc, aquamarine::aquamarine)] +/// # Block Execution +/// +/// These are the steps of block execution as done by [`Executive::execute_block`]. A block is +/// invalid if any of them fail. +/// +/// ```mermaid +/// flowchart TD +/// Executive::execute_block --> on_runtime_upgrade +/// on_runtime_upgrade --> System::initialize +/// Executive::initialize_block --> System::initialize +/// System::initialize --> on_initialize +/// on_initialize --> PreInherents[System::PreInherents] +/// PreInherents --> Inherents[Apply Inherents] +/// Inherents --> PostInherents[System::PostInherents] +/// PostInherents --> Check{MBM ongoing?} +/// Check -->|No| poll +/// Check -->|Yes| post_transactions_2[System::PostTransaction] +/// post_transactions_2 --> Step[MBMs::step] +/// Step --> on_finalize +/// poll --> transactions[Apply Transactions] +/// transactions --> post_transactions_1[System::PostTransaction] +/// post_transactions_1 --> CheckIdle{Weight remaining?} +/// CheckIdle -->|Yes| on_idle +/// CheckIdle -->|No| on_finalize +/// on_idle --> on_finalize +/// ``` +pub mod block_flowchart {} + +#[cfg(test)] +mod tests; use codec::{Codec, Encode}; use frame_support::{ + defensive_assert, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo}, + migrations::MultiStepMigrator, pallet_prelude::InvalidTransaction, traits::{ BeforeAllRuntimeMigrations, EnsureInherentsAreFirst, ExecuteBlock, OffchainWorker, - OnFinalize, OnIdle, OnInitialize, OnRuntimeUpgrade, + OnFinalize, OnIdle, OnInitialize, OnPoll, OnRuntimeUpgrade, PostInherents, + PostTransactions, PreInherents, }, - weights::Weight, + weights::{Weight, WeightMeter}, }; use frame_system::pallet_prelude::BlockNumberFor; use sp_runtime::{ @@ -134,7 +172,7 @@ use sp_runtime::{ ValidateUnsigned, Zero, }, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, + ApplyExtrinsicResult, ExtrinsicInclusionMode, }; use sp_std::{marker::PhantomData, prelude::*}; @@ -198,7 +236,8 @@ impl< + OnInitialize> + OnIdle> + OnFinalize> - + OffchainWorker>, + + OffchainWorker> + + OnPoll>, COnRuntimeUpgrade: OnRuntimeUpgrade, > ExecuteBlock for Executive @@ -237,6 +276,7 @@ impl< + OnIdle> + OnFinalize> + OffchainWorker> + + OnPoll> + TryState> + TryDecodeEntireStorage, COnRuntimeUpgrade: OnRuntimeUpgrade, @@ -272,36 +312,50 @@ where select, ); - Self::initialize_block(block.header()); - Self::initial_checks(&block); - + let mode = Self::initialize_block(block.header()); + let num_inherents = Self::initial_checks(&block) as usize; let (header, extrinsics) = block.deconstruct(); + // Check if there are any forbidden non-inherents in the block. + if mode == ExtrinsicInclusionMode::OnlyInherents && extrinsics.len() > num_inherents { + return Err("Only inherents allowed".into()) + } + let try_apply_extrinsic = |uxt: Block::Extrinsic| -> ApplyExtrinsicResult { sp_io::init_tracing(); let encoded = uxt.encode(); let encoded_len = encoded.len(); + let is_inherent = System::is_inherent(&uxt); // skip signature verification. let xt = if signature_check { uxt.check(&Default::default()) } else { uxt.unchecked_into_checked_i_know_what_i_am_doing(&Default::default()) }?; - >::note_extrinsic(encoded); let dispatch_info = xt.get_dispatch_info(); + if !is_inherent && !>::inherents_applied() { + Self::inherents_applied(); + } + + >::note_extrinsic(encoded); let r = Applyable::apply::(xt, &dispatch_info, encoded_len)?; + if r.is_err() && dispatch_info.class == DispatchClass::Mandatory { + return Err(InvalidTransaction::BadMandatory.into()) + } + >::note_applied_extrinsic(&r, dispatch_info); Ok(r.map(|_| ()).map_err(|e| e.error)) }; - for e in extrinsics { + // Apply extrinsics: + for e in extrinsics.iter() { if let Err(err) = try_apply_extrinsic(e.clone()) { log::error!( - target: LOG_TARGET, "executing transaction {:?} failed due to {:?}. Aborting the rest of the block execution.", + target: LOG_TARGET, "transaction {:?} failed due to {:?}. Aborting the rest of the block execution.", e, err, ); @@ -309,9 +363,17 @@ where } } + // In this case there were no transactions to trigger this state transition: + if !>::inherents_applied() { + Self::inherents_applied(); + } + // post-extrinsics book-keeping >::note_finished_extrinsics(); - Self::idle_and_finalize_hook(*header.number()); + ::PostTransactions::post_transactions(); + + Self::on_idle_hook(*header.number()); + Self::on_finalize_hook(*header.number()); // run the try-state checks of all pallets, ensuring they don't alter any state. let _guard = frame_support::StorageNoopGuard::default(); @@ -449,7 +511,8 @@ impl< + OnInitialize> + OnIdle> + OnFinalize> - + OffchainWorker>, + + OffchainWorker> + + OnPoll>, COnRuntimeUpgrade: OnRuntimeUpgrade, > Executive where @@ -464,16 +527,36 @@ where pub fn execute_on_runtime_upgrade() -> Weight { let before_all_weight = ::before_all_runtime_migrations(); - <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::on_runtime_upgrade() - .saturating_add(before_all_weight) + + let runtime_upgrade_weight = <( + COnRuntimeUpgrade, + ::SingleBlockMigrations, + // We want to run the migrations before we call into the pallets as they may + // access any state that would then not be migrated. + AllPalletsWithSystem, + ) as OnRuntimeUpgrade>::on_runtime_upgrade(); + + before_all_weight.saturating_add(runtime_upgrade_weight) } /// Start the execution of a particular block. - pub fn initialize_block(header: &frame_system::pallet_prelude::HeaderFor) { + pub fn initialize_block( + header: &frame_system::pallet_prelude::HeaderFor, + ) -> ExtrinsicInclusionMode { sp_io::init_tracing(); sp_tracing::enter_span!(sp_tracing::Level::TRACE, "init_block"); let digests = Self::extract_pre_digest(header); Self::initialize_block_impl(header.number(), header.parent_hash(), &digests); + + Self::extrinsic_mode() + } + + fn extrinsic_mode() -> ExtrinsicInclusionMode { + if ::MultiBlockMigrator::ongoing() { + ExtrinsicInclusionMode::OnlyInherents + } else { + ExtrinsicInclusionMode::AllExtrinsics + } } fn extract_pre_digest(header: &frame_system::pallet_prelude::HeaderFor) -> Digest { @@ -519,6 +602,7 @@ where ); frame_system::Pallet::::note_finished_initialize(); + ::PreInherents::pre_inherents(); } /// Returns if the runtime has been upgraded, based on [`frame_system::LastRuntimeUpgrade`]. @@ -529,7 +613,8 @@ where last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) } - fn initial_checks(block: &Block) { + /// Returns the number of inherents in the block. + fn initial_checks(block: &Block) -> u32 { sp_tracing::enter_span!(sp_tracing::Level::TRACE, "initial_checks"); let header = block.header(); @@ -542,8 +627,9 @@ where "Parent hash should be valid.", ); - if let Err(i) = System::ensure_inherents_are_first(block) { - panic!("Invalid inherent position for extrinsic at index {}", i); + match System::ensure_inherents_are_first(block) { + Ok(num) => num, + Err(i) => panic!("Invalid inherent position for extrinsic at index {}", i), } } @@ -552,53 +638,90 @@ where sp_io::init_tracing(); sp_tracing::within_span! { sp_tracing::info_span!("execute_block", ?block); + // Execute `on_runtime_upgrade` and `on_initialize`. + let mode = Self::initialize_block(block.header()); + let num_inherents = Self::initial_checks(&block) as usize; + let (header, extrinsics) = block.deconstruct(); + let num_extrinsics = extrinsics.len(); - Self::initialize_block(block.header()); + if mode == ExtrinsicInclusionMode::OnlyInherents && num_extrinsics > num_inherents { + // Invalid block + panic!("Only inherents are allowed in this block") + } - // any initial checks - Self::initial_checks(&block); + Self::apply_extrinsics(extrinsics.into_iter()); - // execute extrinsics - let (header, extrinsics) = block.deconstruct(); - Self::execute_extrinsics_with_book_keeping(extrinsics, *header.number()); + // In this case there were no transactions to trigger this state transition: + if !>::inherents_applied() { + defensive_assert!(num_inherents == num_extrinsics); + Self::inherents_applied(); + } - // any final checks + >::note_finished_extrinsics(); + ::PostTransactions::post_transactions(); + + Self::on_idle_hook(*header.number()); + Self::on_finalize_hook(*header.number()); Self::final_checks(&header); } } - /// Execute given extrinsics and take care of post-extrinsics book-keeping. - fn execute_extrinsics_with_book_keeping( - extrinsics: Vec, - block_number: NumberFor, - ) { + /// Logic that runs directly after inherent application. + /// + /// It advances the Multi-Block-Migrations or runs the `on_poll` hook. + pub fn inherents_applied() { + >::note_inherents_applied(); + ::PostInherents::post_inherents(); + + if ::MultiBlockMigrator::ongoing() { + let used_weight = ::MultiBlockMigrator::step(); + >::register_extra_weight_unchecked( + used_weight, + DispatchClass::Mandatory, + ); + } else { + let block_number = >::block_number(); + Self::on_poll_hook(block_number); + } + } + + /// Execute given extrinsics. + fn apply_extrinsics(extrinsics: impl Iterator) { extrinsics.into_iter().for_each(|e| { if let Err(e) = Self::apply_extrinsic(e) { let err: &'static str = e.into(); panic!("{}", err) } }); - - // post-extrinsics book-keeping - >::note_finished_extrinsics(); - - Self::idle_and_finalize_hook(block_number); } /// Finalize the block - it is up the caller to ensure that all header fields are valid /// except state-root. + // Note: Only used by the block builder - not Executive itself. pub fn finalize_block() -> frame_system::pallet_prelude::HeaderFor { sp_io::init_tracing(); sp_tracing::enter_span!(sp_tracing::Level::TRACE, "finalize_block"); - >::note_finished_extrinsics(); - let block_number = >::block_number(); - Self::idle_and_finalize_hook(block_number); + // In this case there were no transactions to trigger this state transition: + if !>::inherents_applied() { + Self::inherents_applied(); + } + >::note_finished_extrinsics(); + ::PostTransactions::post_transactions(); + let block_number = >::block_number(); + Self::on_idle_hook(block_number); + Self::on_finalize_hook(block_number); >::finalize() } - fn idle_and_finalize_hook(block_number: NumberFor) { + /// Run the `on_idle` hook of all pallet, but only if there is weight remaining and there are no + /// ongoing MBMs. + fn on_idle_hook(block_number: NumberFor) { + if ::MultiBlockMigrator::ongoing() { + return + } + let weight = >::block_weight(); let max_weight = >::get().max_block; let remaining_weight = max_weight.saturating_sub(weight.total()); @@ -613,7 +736,33 @@ where DispatchClass::Mandatory, ); } + } + + fn on_poll_hook(block_number: NumberFor) { + defensive_assert!( + !::MultiBlockMigrator::ongoing(), + "on_poll should not be called during migrations" + ); + let weight = >::block_weight(); + let max_weight = >::get().max_block; + let remaining = max_weight.saturating_sub(weight.total()); + + if remaining.all_gt(Weight::zero()) { + let mut meter = WeightMeter::with_limit(remaining); + >>::on_poll( + block_number, + &mut meter, + ); + >::register_extra_weight_unchecked( + meter.consumed(), + DispatchClass::Mandatory, + ); + } + } + + /// Run the `on_finalize` hook of all pallet. + fn on_finalize_hook(block_number: NumberFor) { >>::on_finalize(block_number); } @@ -627,8 +776,18 @@ where let encoded_len = encoded.len(); sp_tracing::enter_span!(sp_tracing::info_span!("apply_extrinsic", ext=?sp_core::hexdisplay::HexDisplay::from(&encoded))); + + // We use the dedicated `is_inherent` check here, since just relying on `Mandatory` dispatch + // class does not capture optional inherents. + let is_inherent = System::is_inherent(&uxt); + // Verify that the signature is good. let xt = uxt.check(&Default::default())?; + let dispatch_info = xt.get_dispatch_info(); + + if !is_inherent && !>::inherents_applied() { + Self::inherents_applied(); + } // We don't need to make sure to `note_extrinsic` only after we know it's going to be // executed to prevent it from leaking in storage since at this point, it will either @@ -637,8 +796,6 @@ where // AUDIT: Under no circumstances may this function panic from here onwards. - // Decode parameters and dispatch - let dispatch_info = xt.get_dispatch_info(); let r = Applyable::apply::(xt, &dispatch_info, encoded_len)?; // Mandatory(inherents) are not allowed to fail. @@ -745,956 +902,3 @@ where ) } } - -#[cfg(test)] -mod tests { - use super::*; - - use sp_core::H256; - use sp_runtime::{ - generic::{DigestItem, Era}, - testing::{Block, Digest, Header}, - traits::{BlakeTwo256, Block as BlockT, Header as HeaderT, IdentityLookup}, - transaction_validity::{ - InvalidTransaction, TransactionValidityError, UnknownTransaction, ValidTransaction, - }, - BuildStorage, DispatchError, - }; - - use frame_support::{ - assert_err, derive_impl, parameter_types, - traits::{fungible, ConstU32, ConstU64, ConstU8, Currency}, - weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightToFee}, - }; - use frame_system::{ChainContext, LastRuntimeUpgrade, LastRuntimeUpgradeInfo}; - use pallet_balances::Call as BalancesCall; - use pallet_transaction_payment::CurrencyAdapter; - - const TEST_KEY: &[u8] = b":test:key:"; - - #[frame_support::pallet(dev_mode)] - mod custom { - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::pallet] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::hooks] - impl Hooks> for Pallet { - // module hooks. - // one with block number arg and one without - fn on_initialize(n: BlockNumberFor) -> Weight { - println!("on_initialize({})", n); - Weight::from_parts(175, 0) - } - - fn on_idle(n: BlockNumberFor, remaining_weight: Weight) -> Weight { - println!("on_idle{}, {})", n, remaining_weight); - Weight::from_parts(175, 0) - } - - fn on_finalize(n: BlockNumberFor) { - println!("on_finalize({})", n); - } - - fn on_runtime_upgrade() -> Weight { - sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); - Weight::from_parts(200, 0) - } - - fn offchain_worker(n: BlockNumberFor) { - assert_eq!(BlockNumberFor::::from(1u32), n); - } - } - - #[pallet::call] - impl Pallet { - pub fn some_function(origin: OriginFor) -> DispatchResult { - // NOTE: does not make any different. - frame_system::ensure_signed(origin)?; - Ok(()) - } - - #[pallet::weight((200, DispatchClass::Operational))] - pub fn some_root_operation(origin: OriginFor) -> DispatchResult { - frame_system::ensure_root(origin)?; - Ok(()) - } - - pub fn some_unsigned_message(origin: OriginFor) -> DispatchResult { - frame_system::ensure_none(origin)?; - Ok(()) - } - - pub fn allowed_unsigned(origin: OriginFor) -> DispatchResult { - frame_system::ensure_root(origin)?; - Ok(()) - } - - pub fn unallowed_unsigned(origin: OriginFor) -> DispatchResult { - frame_system::ensure_root(origin)?; - Ok(()) - } - - #[pallet::weight((0, DispatchClass::Mandatory))] - pub fn inherent_call(origin: OriginFor) -> DispatchResult { - frame_system::ensure_none(origin)?; - Ok(()) - } - - pub fn calculate_storage_root(_origin: OriginFor) -> DispatchResult { - let root = sp_io::storage::root(sp_runtime::StateVersion::V1); - sp_io::storage::set("storage_root".as_bytes(), &root); - Ok(()) - } - } - - #[pallet::inherent] - impl ProvideInherent for Pallet { - type Call = Call; - - type Error = sp_inherents::MakeFatalError<()>; - - const INHERENT_IDENTIFIER: [u8; 8] = *b"test1234"; - - fn create_inherent(_data: &InherentData) -> Option { - None - } - - fn is_inherent(call: &Self::Call) -> bool { - *call == Call::::inherent_call {} - } - } - - #[pallet::validate_unsigned] - impl ValidateUnsigned for Pallet { - type Call = Call; - - // Inherent call is accepted for being dispatched - fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { - match call { - Call::allowed_unsigned { .. } => Ok(()), - Call::inherent_call { .. } => Ok(()), - _ => Err(UnknownTransaction::NoUnsignedValidator.into()), - } - } - - // Inherent call is not validated as unsigned - fn validate_unsigned( - _source: TransactionSource, - call: &Self::Call, - ) -> TransactionValidity { - match call { - Call::allowed_unsigned { .. } => Ok(Default::default()), - _ => UnknownTransaction::NoUnsignedValidator.into(), - } - } - } - } - - frame_support::construct_runtime!( - pub enum Runtime { - System: frame_system, - Balances: pallet_balances, - TransactionPayment: pallet_transaction_payment, - Custom: custom, - } - ); - - parameter_types! { - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::builder() - .base_block(Weight::from_parts(10, 0)) - .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = Weight::from_parts(5, 0)) - .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_parts(1024, u64::MAX).into()) - .build_or_panic(); - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 10, - write: 100, - }; - } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] - impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = BlockWeights; - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = sp_core::H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Block = TestBlock; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = RuntimeVersion; - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; - } - - type Balance = u64; - impl pallet_balances::Config for Runtime { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; - type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); - } - - parameter_types! { - pub const TransactionByteFee: Balance = 0; - } - impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = CurrencyAdapter; - type OperationalFeeMultiplier = ConstU8<5>; - type WeightToFee = IdentityFee; - type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = (); - } - impl custom::Config for Runtime {} - - pub struct RuntimeVersion; - impl frame_support::traits::Get for RuntimeVersion { - fn get() -> sp_version::RuntimeVersion { - RuntimeVersionTestValues::get().clone() - } - } - - parameter_types! { - pub static RuntimeVersionTestValues: sp_version::RuntimeVersion = - Default::default(); - } - - type SignedExtra = ( - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, - ); - type TestXt = sp_runtime::testing::TestXt; - type TestBlock = Block; - - // Will contain `true` when the custom runtime logic was called. - const CUSTOM_ON_RUNTIME_KEY: &[u8] = b":custom:on_runtime"; - - struct CustomOnRuntimeUpgrade; - impl OnRuntimeUpgrade for CustomOnRuntimeUpgrade { - fn on_runtime_upgrade() -> Weight { - sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); - sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); - System::deposit_event(frame_system::Event::CodeUpdated); - - assert_eq!(0, System::last_runtime_upgrade_spec_version()); - - Weight::from_parts(100, 0) - } - } - - type Executive = super::Executive< - Runtime, - Block, - ChainContext, - Runtime, - AllPalletsWithSystem, - CustomOnRuntimeUpgrade, - >; - - fn extra(nonce: u64, fee: Balance) -> SignedExtra { - ( - frame_system::CheckEra::from(Era::Immortal), - frame_system::CheckNonce::from(nonce), - frame_system::CheckWeight::new(), - pallet_transaction_payment::ChargeTransactionPayment::from(fee), - ) - } - - fn sign_extra(who: u64, nonce: u64, fee: Balance) -> Option<(u64, SignedExtra)> { - Some((who, extra(nonce, fee))) - } - - fn call_transfer(dest: u64, value: u64) -> RuntimeCall { - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }) - } - - #[test] - fn balance_transfer_dispatch_works() { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { balances: vec![(1, 211)] } - .assimilate_storage(&mut t) - .unwrap(); - let xt = TestXt::new(call_transfer(2, 69), sign_extra(1, 0, 0)); - let weight = xt.get_dispatch_info().weight + - ::BlockWeights::get() - .get(DispatchClass::Normal) - .base_extrinsic; - let fee: Balance = - ::WeightToFee::weight_to_fee(&weight); - let mut t = sp_io::TestExternalities::new(t); - t.execute_with(|| { - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - let r = Executive::apply_extrinsic(xt); - assert!(r.is_ok()); - assert_eq!(>::total_balance(&1), 142 - fee); - assert_eq!(>::total_balance(&2), 69); - }); - } - - fn new_test_ext(balance_factor: Balance) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } - .assimilate_storage(&mut t) - .unwrap(); - t.into() - } - - fn new_test_ext_v0(balance_factor: Balance) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } - .assimilate_storage(&mut t) - .unwrap(); - (t, sp_runtime::StateVersion::V0).into() - } - - #[test] - fn block_import_works() { - block_import_works_inner( - new_test_ext_v0(1), - array_bytes::hex_n_into_unchecked( - "65e953676859e7a33245908af7ad3637d6861eb90416d433d485e95e2dd174a1", - ), - ); - block_import_works_inner( - new_test_ext(1), - array_bytes::hex_n_into_unchecked( - "5a19b3d6fdb7241836349fdcbe2d9df4d4f945b949d979e31ad50bff1cbcd1c2", - ), - ); - } - fn block_import_works_inner(mut ext: sp_io::TestExternalities, state_root: H256) { - ext.execute_with(|| { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root, - extrinsics_root: array_bytes::hex_n_into_unchecked( - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", - ), - digest: Digest { logs: vec![] }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - #[should_panic] - fn block_import_of_bad_state_root_fails() { - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: [0u8; 32].into(), - extrinsics_root: array_bytes::hex_n_into_unchecked( - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", - ), - digest: Digest { logs: vec![] }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - #[should_panic] - fn block_import_of_bad_extrinsic_root_fails() { - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: array_bytes::hex_n_into_unchecked( - "75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5", - ), - extrinsics_root: [0u8; 32].into(), - digest: Digest { logs: vec![] }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - fn bad_extrinsic_not_inserted() { - let mut t = new_test_ext(1); - // bad nonce check! - let xt = TestXt::new(call_transfer(33, 69), sign_extra(1, 30, 0)); - t.execute_with(|| { - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - assert_err!( - Executive::apply_extrinsic(xt), - TransactionValidityError::Invalid(InvalidTransaction::Future) - ); - assert_eq!(>::extrinsic_index(), Some(0)); - }); - } - - #[test] - fn block_weight_limit_enforced() { - let mut t = new_test_ext(10000); - // given: TestXt uses the encoded len as fixed Len: - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - let encoded = xt.encode(); - let encoded_len = encoded.len() as u64; - // on_initialize weight + base block execution weight - let block_weights = ::BlockWeights::get(); - let base_block_weight = Weight::from_parts(175, 0) + block_weights.base_block; - let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - base_block_weight; - let num_to_exhaust_block = limit.ref_time() / (encoded_len + 5); - t.execute_with(|| { - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - // Base block execution weight + `on_initialize` weight from the custom module. - assert_eq!(>::block_weight().total(), base_block_weight); - - for nonce in 0..=num_to_exhaust_block { - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { - dest: 33, - value: 0, - }), - sign_extra(1, nonce.into(), 0), - ); - let res = Executive::apply_extrinsic(xt); - if nonce != num_to_exhaust_block { - assert!(res.is_ok()); - assert_eq!( - >::block_weight().total(), - //--------------------- on_initialize + block_execution + extrinsic_base weight - Weight::from_parts((encoded_len + 5) * (nonce + 1), 0) + base_block_weight, - ); - assert_eq!( - >::extrinsic_index(), - Some(nonce as u32 + 1) - ); - } else { - assert_eq!(res, Err(InvalidTransaction::ExhaustsResources.into())); - } - } - }); - } - - #[test] - fn block_weight_and_size_is_stored_per_tx() { - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - let x1 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 1, 0), - ); - let x2 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 2, 0), - ); - let len = xt.clone().encode().len() as u32; - let mut t = new_test_ext(1); - t.execute_with(|| { - // Block execution weight + on_initialize weight from custom module - let base_block_weight = Weight::from_parts(175, 0) + - ::BlockWeights::get().base_block; - - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - assert_eq!(>::block_weight().total(), base_block_weight); - assert_eq!(>::all_extrinsics_len(), 0); - - assert!(Executive::apply_extrinsic(xt.clone()).unwrap().is_ok()); - assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); - assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); - - // default weight for `TestXt` == encoded length. - let extrinsic_weight = Weight::from_parts(len as u64, 0) + - ::BlockWeights::get() - .get(DispatchClass::Normal) - .base_extrinsic; - assert_eq!( - >::block_weight().total(), - base_block_weight + 3u64 * extrinsic_weight, - ); - assert_eq!(>::all_extrinsics_len(), 3 * len); - - let _ = >::finalize(); - // All extrinsics length cleaned on `System::finalize` - assert_eq!(>::all_extrinsics_len(), 0); - - // New Block - Executive::initialize_block(&Header::new( - 2, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - // Block weight cleaned up on `System::initialize` - assert_eq!(>::block_weight().total(), base_block_weight); - }); - } - - #[test] - fn validate_unsigned() { - let valid = TestXt::new(RuntimeCall::Custom(custom::Call::allowed_unsigned {}), None); - let invalid = TestXt::new(RuntimeCall::Custom(custom::Call::unallowed_unsigned {}), None); - let mut t = new_test_ext(1); - - t.execute_with(|| { - assert_eq!( - Executive::validate_transaction( - TransactionSource::InBlock, - valid.clone(), - Default::default(), - ), - Ok(ValidTransaction::default()), - ); - assert_eq!( - Executive::validate_transaction( - TransactionSource::InBlock, - invalid.clone(), - Default::default(), - ), - Err(TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator)), - ); - assert_eq!(Executive::apply_extrinsic(valid), Ok(Err(DispatchError::BadOrigin))); - assert_eq!( - Executive::apply_extrinsic(invalid), - Err(TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator)) - ); - }); - } - - #[test] - fn can_not_pay_for_tx_fee_on_full_lock() { - let mut t = new_test_ext(1); - t.execute_with(|| { - as fungible::MutateFreeze>::set_freeze( - &(), - &1, - 110, - ) - .unwrap(); - let xt = TestXt::new( - RuntimeCall::System(frame_system::Call::remark { remark: vec![1u8] }), - sign_extra(1, 0, 0), - ); - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - assert_eq!(Executive::apply_extrinsic(xt), Err(InvalidTransaction::Payment.into()),); - assert_eq!(>::total_balance(&1), 111); - }); - } - - #[test] - fn block_hooks_weight_is_stored() { - new_test_ext(1).execute_with(|| { - Executive::initialize_block(&Header::new_from_number(1)); - Executive::finalize_block(); - // NOTE: might need updates over time if new weights are introduced. - // For now it only accounts for the base block execution weight and - // the `on_initialize` weight defined in the custom test module. - assert_eq!( - >::block_weight().total(), - Weight::from_parts(175 + 175 + 10, 0) - ); - }) - } - - #[test] - fn runtime_upgraded_should_work() { - new_test_ext(1).execute_with(|| { - RuntimeVersionTestValues::mutate(|v| *v = Default::default()); - // It should be added at genesis - assert!(LastRuntimeUpgrade::::exists()); - assert!(!Executive::runtime_upgraded()); - - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - assert!(Executive::runtime_upgraded()); - - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { - spec_version: 1, - spec_name: "test".into(), - ..Default::default() - } - }); - assert!(Executive::runtime_upgraded()); - - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { - spec_version: 0, - impl_version: 2, - ..Default::default() - } - }); - assert!(!Executive::runtime_upgraded()); - - LastRuntimeUpgrade::::take(); - assert!(Executive::runtime_upgraded()); - }) - } - - #[test] - fn last_runtime_upgrade_was_upgraded_works() { - let test_data = vec![ - (0, "", 1, "", true), - (1, "", 1, "", false), - (1, "", 1, "test", true), - (1, "", 0, "", false), - (1, "", 0, "test", true), - ]; - - for (spec_version, spec_name, c_spec_version, c_spec_name, result) in test_data { - let current = sp_version::RuntimeVersion { - spec_version: c_spec_version, - spec_name: c_spec_name.into(), - ..Default::default() - }; - - let last = LastRuntimeUpgradeInfo { - spec_version: spec_version.into(), - spec_name: spec_name.into(), - }; - - assert_eq!(result, last.was_upgraded(¤t)); - } - } - - #[test] - fn custom_runtime_upgrade_is_called_before_modules() { - new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); - assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); - assert_eq!( - Some(RuntimeVersionTestValues::get().into()), - LastRuntimeUpgrade::::get(), - ) - }); - } - - #[test] - fn event_from_runtime_upgrade_is_included() { - new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - // set block number to non zero so events are not excluded - System::set_block_number(1); - - Executive::initialize_block(&Header::new( - 2, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - System::assert_last_event(frame_system::Event::::CodeUpdated.into()); - }); - } - - /// Regression test that ensures that the custom on runtime upgrade is called when executive is - /// used through the `ExecuteBlock` trait. - #[test] - fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - - let header = new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - // Let's build some fake block. - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); - - Executive::finalize_block() - }); - - // Reset to get the correct new genesis below. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 0, ..Default::default() } - }); - - new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - >>::execute_block(Block::new(header, vec![xt])); - - assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); - assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); - }); - } - - #[test] - fn all_weights_are_recorded_correctly() { - // Reset to get the correct new genesis below. - RuntimeVersionTestValues::take(); - - new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called for maximum complexity - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - let block_number = 1; - - Executive::initialize_block(&Header::new( - block_number, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - // Reset the last runtime upgrade info, to make the second call to `on_runtime_upgrade` - // succeed. - LastRuntimeUpgrade::::take(); - - // All weights that show up in the `initialize_block_impl` - let custom_runtime_upgrade_weight = CustomOnRuntimeUpgrade::on_runtime_upgrade(); - let runtime_upgrade_weight = - ::on_runtime_upgrade(); - let on_initialize_weight = - >::on_initialize(block_number); - let base_block_weight = - ::BlockWeights::get().base_block; - - // Weights are recorded correctly - assert_eq!( - frame_system::Pallet::::block_weight().total(), - custom_runtime_upgrade_weight + - runtime_upgrade_weight + - on_initialize_weight + base_block_weight, - ); - }); - } - - #[test] - fn offchain_worker_works_as_expected() { - new_test_ext(1).execute_with(|| { - let parent_hash = sp_core::H256::from([69u8; 32]); - let mut digest = Digest::default(); - digest.push(DigestItem::Seal([1, 2, 3, 4], vec![5, 6, 7, 8])); - - let header = - Header::new(1, H256::default(), H256::default(), parent_hash, digest.clone()); - - Executive::offchain_worker(&header); - - assert_eq!(digest, System::digest()); - assert_eq!(parent_hash, System::block_hash(0)); - assert_eq!(header.hash(), System::block_hash(1)); - }); - } - - #[test] - fn calculating_storage_root_twice_works() { - let call = RuntimeCall::Custom(custom::Call::calculate_storage_root {}); - let xt = TestXt::new(call, sign_extra(1, 0, 0)); - - let header = new_test_ext(1).execute_with(|| { - // Let's build some fake block. - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); - - Executive::finalize_block() - }); - - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block::new(header, vec![xt])); - }); - } - - #[test] - #[should_panic(expected = "Invalid inherent position for extrinsic at index 1")] - fn invalid_inherent_position_fail() { - let xt1 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - let xt2 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); - - let header = new_test_ext(1).execute_with(|| { - // Let's build some fake block. - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); - Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); - - Executive::finalize_block() - }); - - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block::new(header, vec![xt1, xt2])); - }); - } - - #[test] - fn valid_inherents_position_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); - - let header = new_test_ext(1).execute_with(|| { - // Let's build some fake block. - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); - Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); - - Executive::finalize_block() - }); - - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block::new(header, vec![xt1, xt2])); - }); - } - - #[test] - #[should_panic(expected = "A call was labelled as mandatory, but resulted in an Error.")] - fn invalid_inherents_fail_block_execution() { - let xt1 = - TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), sign_extra(1, 0, 0)); - - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block::new( - Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - ), - vec![xt1], - )); - }); - } - - // Inherents are created by the runtime and don't need to be validated. - #[test] - fn inherents_fail_validate_block() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); - - new_test_ext(1).execute_with(|| { - assert_eq!( - Executive::validate_transaction(TransactionSource::External, xt1, H256::random()) - .unwrap_err(), - InvalidTransaction::MandatoryValidation.into() - ); - }) - } -} diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..a3f70a9fc3c2a081b52a08f3e9a8a617ab75c7a9 --- /dev/null +++ b/substrate/frame/executive/src/tests.rs @@ -0,0 +1,1476 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test the `frame-executive` crate. + +use super::*; + +use sp_core::H256; +use sp_runtime::{ + generic::{DigestItem, Era}, + testing::{Block, Digest, Header}, + traits::{Block as BlockT, Header as HeaderT}, + transaction_validity::{ + InvalidTransaction, TransactionValidityError, UnknownTransaction, ValidTransaction, + }, + BuildStorage, DispatchError, +}; + +use frame_support::{ + assert_err, assert_ok, derive_impl, + migrations::MultiStepMigrator, + pallet_prelude::*, + parameter_types, + traits::{fungible, ConstU8, Currency, IsInherent}, + weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightMeter, WeightToFee}, +}; +use frame_system::{pallet_prelude::*, ChainContext, LastRuntimeUpgrade, LastRuntimeUpgradeInfo}; +use pallet_balances::Call as BalancesCall; +use pallet_transaction_payment::CurrencyAdapter; + +const TEST_KEY: &[u8] = b":test:key:"; + +#[frame_support::pallet(dev_mode)] +mod custom { + use super::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::hooks] + impl Hooks> for Pallet { + // module hooks. + // one with block number arg and one without + fn on_initialize(_: BlockNumberFor) -> Weight { + Weight::from_parts(175, 0) + } + + fn on_idle(_: BlockNumberFor, _: Weight) -> Weight { + Weight::from_parts(175, 0) + } + + fn on_poll(_: BlockNumberFor, _: &mut WeightMeter) {} + + fn on_finalize(_: BlockNumberFor) {} + + fn on_runtime_upgrade() -> Weight { + sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); + Weight::from_parts(200, 0) + } + + fn offchain_worker(n: BlockNumberFor) { + assert_eq!(BlockNumberFor::::from(1u32), n); + } + } + + #[pallet::call] + impl Pallet { + pub fn some_function(origin: OriginFor) -> DispatchResult { + // NOTE: does not make any difference. + frame_system::ensure_signed(origin)?; + Ok(()) + } + + #[pallet::weight((200, DispatchClass::Operational))] + pub fn some_root_operation(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } + + pub fn some_unsigned_message(origin: OriginFor) -> DispatchResult { + frame_system::ensure_none(origin)?; + Ok(()) + } + + pub fn allowed_unsigned(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } + + pub fn unallowed_unsigned(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } + + #[pallet::weight((0, DispatchClass::Mandatory))] + pub fn inherent(origin: OriginFor) -> DispatchResult { + frame_system::ensure_none(origin)?; + Ok(()) + } + + pub fn calculate_storage_root(_origin: OriginFor) -> DispatchResult { + let root = sp_io::storage::root(sp_runtime::StateVersion::V1); + sp_io::storage::set("storage_root".as_bytes(), &root); + Ok(()) + } + } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + + type Error = sp_inherents::MakeFatalError<()>; + + const INHERENT_IDENTIFIER: [u8; 8] = *b"test1234"; + + fn create_inherent(_data: &InherentData) -> Option { + None + } + + fn is_inherent(call: &Self::Call) -> bool { + *call == Call::::inherent {} + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + // Inherent call is accepted for being dispatched + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + match call { + Call::allowed_unsigned { .. } => Ok(()), + Call::inherent { .. } => Ok(()), + _ => Err(UnknownTransaction::NoUnsignedValidator.into()), + } + } + + // Inherent call is not validated as unsigned + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + match call { + Call::allowed_unsigned { .. } => Ok(Default::default()), + _ => UnknownTransaction::NoUnsignedValidator.into(), + } + } + } +} + +#[frame_support::pallet(dev_mode)] +mod custom2 { + use super::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::hooks] + impl Hooks> for Pallet { + // module hooks. + // one with block number arg and one without + fn on_initialize(_: BlockNumberFor) -> Weight { + assert!( + !MockedSystemCallbacks::pre_inherent_called(), + "Pre inherent hook goes after on_initialize" + ); + + Weight::from_parts(0, 0) + } + + fn on_idle(_: BlockNumberFor, _: Weight) -> Weight { + assert!( + MockedSystemCallbacks::post_transactions_called(), + "Post transactions hook goes before after on_idle" + ); + Weight::from_parts(0, 0) + } + + fn on_finalize(_: BlockNumberFor) { + assert!( + MockedSystemCallbacks::post_transactions_called(), + "Post transactions hook goes before after on_finalize" + ); + } + + fn on_runtime_upgrade() -> Weight { + sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); + Weight::from_parts(0, 0) + } + } + + #[pallet::call] + impl Pallet { + pub fn allowed_unsigned(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } + + pub fn some_call(_: OriginFor) -> DispatchResult { + assert!(MockedSystemCallbacks::post_inherent_called()); + assert!(!MockedSystemCallbacks::post_transactions_called()); + assert!(System::inherents_applied()); + + Ok(()) + } + + #[pallet::weight({0})] + pub fn optional_inherent(origin: OriginFor) -> DispatchResult { + frame_system::ensure_none(origin)?; + + assert!(MockedSystemCallbacks::pre_inherent_called()); + assert!(!MockedSystemCallbacks::post_inherent_called(), "Should not already be called"); + assert!(!System::inherents_applied()); + + Ok(()) + } + + #[pallet::weight((0, DispatchClass::Mandatory))] + pub fn inherent(origin: OriginFor) -> DispatchResult { + frame_system::ensure_none(origin)?; + + assert!(MockedSystemCallbacks::pre_inherent_called()); + assert!(!MockedSystemCallbacks::post_inherent_called(), "Should not already be called"); + assert!(!System::inherents_applied()); + + Ok(()) + } + } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + + type Error = sp_inherents::MakeFatalError<()>; + + const INHERENT_IDENTIFIER: [u8; 8] = *b"test1235"; + + fn create_inherent(_data: &InherentData) -> Option { + None + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::::inherent {} | Call::::optional_inherent {}) + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + // Inherent call is accepted for being dispatched + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + match call { + Call::allowed_unsigned { .. } | + Call::optional_inherent { .. } | + Call::inherent { .. } => Ok(()), + _ => Err(UnknownTransaction::NoUnsignedValidator.into()), + } + } + + // Inherent call is not validated as unsigned + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + match call { + Call::allowed_unsigned { .. } => Ok(Default::default()), + _ => UnknownTransaction::NoUnsignedValidator.into(), + } + } + } +} + +frame_support::construct_runtime!( + pub struct Runtime + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, + Custom: custom::{Pallet, Call, ValidateUnsigned, Inherent}, + Custom2: custom2::{Pallet, Call, ValidateUnsigned, Inherent}, + } +); + +parameter_types! { + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::builder() + .base_block(Weight::from_parts(10, 0)) + .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = Weight::from_parts(5, 0)) + .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_parts(1024, u64::MAX).into()) + .build_or_panic(); + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 10, + write: 100, + }; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type BlockWeights = BlockWeights; + type RuntimeOrigin = RuntimeOrigin; + type Nonce = u64; + type RuntimeCall = RuntimeCall; + type Block = TestBlock; + type RuntimeEvent = RuntimeEvent; + type Version = RuntimeVersion; + type AccountData = pallet_balances::AccountData; + type PreInherents = MockedSystemCallbacks; + type PostInherents = MockedSystemCallbacks; + type PostTransactions = MockedSystemCallbacks; + type MultiBlockMigrator = MockedModeGetter; +} + +type Balance = u64; + +pub struct BalancesWeights; +impl pallet_balances::WeightInfo for BalancesWeights { + fn transfer_allow_death() -> Weight { + Weight::from_parts(25, 0) + } + fn transfer_keep_alive() -> Weight { + Weight::zero() + } + fn force_set_balance_creating() -> Weight { + Weight::zero() + } + fn force_set_balance_killing() -> Weight { + Weight::zero() + } + fn force_transfer() -> Weight { + Weight::zero() + } + fn transfer_all() -> Weight { + Weight::zero() + } + fn force_unreserve() -> Weight { + Weight::zero() + } + fn upgrade_accounts(_u: u32) -> Weight { + Weight::zero() + } + fn force_adjust_total_issuance() -> Weight { + Weight::zero() + } +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +impl pallet_balances::Config for Runtime { + type Balance = Balance; + type AccountStore = System; + type WeightInfo = BalancesWeights; +} + +parameter_types! { + pub const TransactionByteFee: Balance = 0; +} +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = CurrencyAdapter; + type OperationalFeeMultiplier = ConstU8<5>; + type WeightToFee = IdentityFee; + type LengthToFee = ConstantMultiplier; + type FeeMultiplierUpdate = (); + type WeightInfo = (); +} + +impl custom::Config for Runtime {} +impl custom2::Config for Runtime {} + +pub struct RuntimeVersion; +impl frame_support::traits::Get for RuntimeVersion { + fn get() -> sp_version::RuntimeVersion { + RuntimeVersionTestValues::get().clone() + } +} + +#[derive(Clone, Debug, Encode, codec::Decode, PartialEq, Eq, scale_info::TypeInfo)] +pub struct AccountU64(u64); +impl sp_runtime::traits::IdentifyAccount for AccountU64 { + type AccountId = u64; + fn into_account(self) -> u64 { + self.0 + } +} + +impl sp_runtime::traits::Verify for AccountU64 { + type Signer = AccountU64; + fn verify>( + &self, + _msg: L, + _signer: &::AccountId, + ) -> bool { + true + } +} + +impl From for AccountU64 { + fn from(value: u64) -> Self { + Self(value) + } +} + +parameter_types! { + pub static RuntimeVersionTestValues: sp_version::RuntimeVersion = + Default::default(); +} + +type TxExtension = ( + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); +type UncheckedXt = + sp_runtime::generic::UncheckedExtrinsic; +type TestBlock = Block; + +// Will contain `true` when the custom runtime logic was called. +const CUSTOM_ON_RUNTIME_KEY: &[u8] = b":custom:on_runtime"; + +struct CustomOnRuntimeUpgrade; +impl OnRuntimeUpgrade for CustomOnRuntimeUpgrade { + fn on_runtime_upgrade() -> Weight { + sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); + sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); + System::deposit_event(frame_system::Event::CodeUpdated); + + assert_eq!(0, System::last_runtime_upgrade_spec_version()); + + Weight::from_parts(100, 0) + } +} + +type Executive = super::Executive< + Runtime, + Block, + ChainContext, + Runtime, + AllPalletsWithSystem, + CustomOnRuntimeUpgrade, +>; + +parameter_types! { + pub static SystemCallbacksCalled: u32 = 0; +} + +pub struct MockedSystemCallbacks; +impl PreInherents for MockedSystemCallbacks { + fn pre_inherents() { + assert_eq!(SystemCallbacksCalled::get(), 0); + SystemCallbacksCalled::set(1); + // Change the storage to modify the root hash: + frame_support::storage::unhashed::put(b":pre_inherent", b"0"); + } +} + +impl PostInherents for MockedSystemCallbacks { + fn post_inherents() { + assert_eq!(SystemCallbacksCalled::get(), 1); + SystemCallbacksCalled::set(2); + // Change the storage to modify the root hash: + frame_support::storage::unhashed::put(b":post_inherent", b"0"); + } +} + +impl PostTransactions for MockedSystemCallbacks { + fn post_transactions() { + assert_eq!(SystemCallbacksCalled::get(), 2); + SystemCallbacksCalled::set(3); + // Change the storage to modify the root hash: + frame_support::storage::unhashed::put(b":post_transaction", b"0"); + } +} + +impl MockedSystemCallbacks { + fn pre_inherent_called() -> bool { + SystemCallbacksCalled::get() >= 1 + } + + fn post_inherent_called() -> bool { + SystemCallbacksCalled::get() >= 2 + } + + fn post_transactions_called() -> bool { + SystemCallbacksCalled::get() >= 3 + } + + fn reset() { + SystemCallbacksCalled::set(0); + frame_support::storage::unhashed::kill(b":pre_inherent"); + frame_support::storage::unhashed::kill(b":post_inherent"); + frame_support::storage::unhashed::kill(b":post_transaction"); + } +} + +parameter_types! { + pub static MbmActive: bool = false; +} + +pub struct MockedModeGetter; +impl MultiStepMigrator for MockedModeGetter { + fn ongoing() -> bool { + MbmActive::get() + } + + fn step() -> Weight { + Weight::zero() + } +} + +fn tx_ext(nonce: u64, fee: Balance) -> TxExtension { + ( + frame_system::CheckEra::from(Era::Immortal), + frame_system::CheckNonce::from(nonce), + frame_system::CheckWeight::new(), + pallet_transaction_payment::ChargeTransactionPayment::from(fee), + ) + .into() +} + +fn call_transfer(dest: u64, value: u64) -> RuntimeCall { + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }) +} + +#[test] +fn balance_transfer_dispatch_works() { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 211)] } + .assimilate_storage(&mut t) + .unwrap(); + let xt = UncheckedXt::new_signed(call_transfer(2, 69), 1, 1.into(), tx_ext(0, 0)); + let weight = xt.get_dispatch_info().weight + + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + let fee: Balance = + ::WeightToFee::weight_to_fee(&weight); + let mut t = sp_io::TestExternalities::new(t); + t.execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + let r = Executive::apply_extrinsic(xt); + assert!(r.is_ok()); + assert_eq!(>::total_balance(&1), 142 - fee); + assert_eq!(>::total_balance(&2), 69); + }); +} + +fn new_test_ext(balance_factor: Balance) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext: sp_io::TestExternalities = t.into(); + ext.execute_with(|| { + SystemCallbacksCalled::set(0); + }); + ext +} + +fn new_test_ext_v0(balance_factor: Balance) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } + .assimilate_storage(&mut t) + .unwrap(); + (t, sp_runtime::StateVersion::V0).into() +} + +#[test] +fn block_import_works() { + block_import_works_inner( + new_test_ext_v0(1), + array_bytes::hex_n_into_unchecked( + "4826d3bdf87dbbc883d2ab274cbe272f58ed94a904619b59953e48294d1142d2", + ), + ); + block_import_works_inner( + new_test_ext(1), + array_bytes::hex_n_into_unchecked( + "d6b465f5a50c9f8d5a6edc0f01d285a6b19030f097d3aaf1649b7be81649f118", + ), + ); +} +fn block_import_works_inner(mut ext: sp_io::TestExternalities, state_root: H256) { + ext.execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root, + extrinsics_root: array_bytes::hex_n_into_unchecked( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", + ), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); +} + +#[test] +#[should_panic] +fn block_import_of_bad_state_root_fails() { + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: [0u8; 32].into(), + extrinsics_root: array_bytes::hex_n_into_unchecked( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", + ), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); +} + +#[test] +#[should_panic] +fn block_import_of_bad_extrinsic_root_fails() { + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: array_bytes::hex_n_into_unchecked( + "75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5", + ), + extrinsics_root: [0u8; 32].into(), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); +} + +#[test] +fn bad_extrinsic_not_inserted() { + let mut t = new_test_ext(1); + // bad nonce check! + let xt = UncheckedXt::new_signed(call_transfer(33, 69), 1, 1.into(), tx_ext(30, 0)); + t.execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + assert_err!( + Executive::apply_extrinsic(xt), + TransactionValidityError::Invalid(InvalidTransaction::Future) + ); + assert_eq!(>::extrinsic_index(), Some(0)); + }); +} + +#[test] +fn block_weight_limit_enforced() { + let mut t = new_test_ext(10000); + let transfer_weight = + <::WeightInfo as pallet_balances::WeightInfo>::transfer_allow_death(); + // on_initialize weight + base block execution weight + let block_weights = ::BlockWeights::get(); + let base_block_weight = Weight::from_parts(175, 0) + block_weights.base_block; + let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - base_block_weight; + let num_to_exhaust_block = limit.ref_time() / (transfer_weight.ref_time() + 5); + t.execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + // Base block execution weight + `on_initialize` weight from the custom module. + assert_eq!(>::block_weight().total(), base_block_weight); + + for nonce in 0..=num_to_exhaust_block { + let xt = UncheckedXt::new_signed( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + 1, + 1.into(), + tx_ext(nonce.into(), 0), + ); + let res = Executive::apply_extrinsic(xt); + if nonce != num_to_exhaust_block { + assert!(res.is_ok()); + assert_eq!( + >::block_weight().total(), + //--------------------- on_initialize + block_execution + extrinsic_base weight + Weight::from_parts((transfer_weight.ref_time() + 5) * (nonce + 1), 0) + + base_block_weight, + ); + assert_eq!( + >::extrinsic_index(), + Some(nonce as u32 + 1) + ); + } else { + assert_eq!(res, Err(InvalidTransaction::ExhaustsResources.into())); + } + } + }); +} + +#[test] +fn block_weight_and_size_is_stored_per_tx() { + let xt = UncheckedXt::new_signed( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + 1, + 1.into(), + tx_ext(0, 0), + ); + let x1 = UncheckedXt::new_signed( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + 1, + 1.into(), + tx_ext(1, 0), + ); + let x2 = UncheckedXt::new_signed( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + 1, + 1.into(), + tx_ext(2, 0), + ); + let len = xt.clone().encode().len() as u32; + let transfer_weight = <::WeightInfo as pallet_balances::WeightInfo>::transfer_allow_death(); + let mut t = new_test_ext(1); + t.execute_with(|| { + // Block execution weight + on_initialize weight from custom module + let base_block_weight = Weight::from_parts(175, 0) + + ::BlockWeights::get().base_block; + + Executive::initialize_block(&Header::new_from_number(1)); + + assert_eq!(>::block_weight().total(), base_block_weight); + assert_eq!(>::all_extrinsics_len(), 0); + + assert!(Executive::apply_extrinsic(xt.clone()).unwrap().is_ok()); + assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); + assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); + + let extrinsic_weight = transfer_weight + + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + assert_eq!( + >::block_weight().total(), + base_block_weight + 3u64 * extrinsic_weight, + ); + assert_eq!(>::all_extrinsics_len(), 3 * len); + + let _ = >::finalize(); + // All extrinsics length cleaned on `System::finalize` + assert_eq!(>::all_extrinsics_len(), 0); + + // Reset to a new block. + SystemCallbacksCalled::take(); + Executive::initialize_block(&Header::new_from_number(2)); + + // Block weight cleaned up on `System::initialize` + assert_eq!(>::block_weight().total(), base_block_weight); + }); +} + +#[test] +fn validate_unsigned() { + let valid = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::allowed_unsigned {})); + let invalid = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::unallowed_unsigned {})); + let mut t = new_test_ext(1); + + t.execute_with(|| { + assert_eq!( + Executive::validate_transaction( + TransactionSource::InBlock, + valid.clone(), + Default::default(), + ), + Ok(ValidTransaction::default()), + ); + assert_eq!( + Executive::validate_transaction( + TransactionSource::InBlock, + invalid.clone(), + Default::default(), + ), + Err(TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator)), + ); + // Need to initialize the block before applying extrinsics for the `MockedSystemCallbacks` + // check. + Executive::initialize_block(&Header::new_from_number(1)); + assert_eq!(Executive::apply_extrinsic(valid), Ok(Err(DispatchError::BadOrigin))); + assert_eq!( + Executive::apply_extrinsic(invalid), + Err(TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator)) + ); + }); +} + +#[test] +fn can_not_pay_for_tx_fee_on_full_lock() { + let mut t = new_test_ext(1); + t.execute_with(|| { + as fungible::MutateFreeze>::set_freeze(&(), &1, 110) + .unwrap(); + let xt = UncheckedXt::new_signed( + RuntimeCall::System(frame_system::Call::remark { remark: vec![1u8] }), + 1, + 1.into(), + tx_ext(0, 0), + ); + Executive::initialize_block(&Header::new_from_number(1)); + + assert_eq!(Executive::apply_extrinsic(xt), Err(InvalidTransaction::Payment.into()),); + assert_eq!(>::total_balance(&1), 111); + }); +} + +#[test] +fn block_hooks_weight_is_stored() { + new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + Executive::finalize_block(); + // NOTE: might need updates over time if new weights are introduced. + // For now it only accounts for the base block execution weight and + // the `on_initialize` weight defined in the custom test module. + assert_eq!( + >::block_weight().total(), + Weight::from_parts(175 + 175 + 10, 0) + ); + }) +} + +#[test] +fn runtime_upgraded_should_work() { + new_test_ext(1).execute_with(|| { + RuntimeVersionTestValues::mutate(|v| *v = Default::default()); + // It should be added at genesis + assert!(LastRuntimeUpgrade::::exists()); + assert!(!Executive::runtime_upgraded()); + + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + assert!(Executive::runtime_upgraded()); + + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { + spec_version: 1, + spec_name: "test".into(), + ..Default::default() + } + }); + assert!(Executive::runtime_upgraded()); + + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { + spec_version: 0, + impl_version: 2, + ..Default::default() + } + }); + assert!(!Executive::runtime_upgraded()); + + LastRuntimeUpgrade::::take(); + assert!(Executive::runtime_upgraded()); + }) +} + +#[test] +fn last_runtime_upgrade_was_upgraded_works() { + let test_data = vec![ + (0, "", 1, "", true), + (1, "", 1, "", false), + (1, "", 1, "test", true), + (1, "", 0, "", false), + (1, "", 0, "test", true), + ]; + + for (spec_version, spec_name, c_spec_version, c_spec_name, result) in test_data { + let current = sp_version::RuntimeVersion { + spec_version: c_spec_version, + spec_name: c_spec_name.into(), + ..Default::default() + }; + + let last = LastRuntimeUpgradeInfo { + spec_version: spec_version.into(), + spec_name: spec_name.into(), + }; + + assert_eq!(result, last.was_upgraded(¤t)); + } +} + +#[test] +fn custom_runtime_upgrade_is_called_before_modules() { + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + Executive::initialize_block(&Header::new_from_number(1)); + + assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); + assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); + assert_eq!( + Some(RuntimeVersionTestValues::get().into()), + LastRuntimeUpgrade::::get(), + ) + }); +} + +#[test] +fn event_from_runtime_upgrade_is_included() { + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + // set block number to non zero so events are not excluded + System::set_block_number(1); + + Executive::initialize_block(&Header::new_from_number(2)); + System::assert_last_event(frame_system::Event::::CodeUpdated.into()); + }); +} + +/// Regression test that ensures that the custom on runtime upgrade is called when executive is +/// used through the `ExecuteBlock` trait. +#[test] +fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { + let xt = UncheckedXt::new_signed( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + 1, + 1.into(), + tx_ext(0, 0), + ); + + let header = new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + // Reset to get the correct new genesis below. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 0, ..Default::default() } + }); + + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + >>::execute_block(Block::new( + header, + vec![xt], + )); + + assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); + assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); + }); +} + +#[test] +fn all_weights_are_recorded_correctly() { + // Reset to get the correct new genesis below. + RuntimeVersionTestValues::take(); + + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called for maximum complexity + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + let block_number = 1; + + Executive::initialize_block(&Header::new_from_number(block_number)); + + // Reset the last runtime upgrade info, to make the second call to `on_runtime_upgrade` + // succeed. + LastRuntimeUpgrade::::take(); + MockedSystemCallbacks::reset(); + + // All weights that show up in the `initialize_block_impl` + let custom_runtime_upgrade_weight = CustomOnRuntimeUpgrade::on_runtime_upgrade(); + let runtime_upgrade_weight = + ::on_runtime_upgrade(); + let on_initialize_weight = + >::on_initialize(block_number); + let base_block_weight = ::BlockWeights::get().base_block; + + // Weights are recorded correctly + assert_eq!( + frame_system::Pallet::::block_weight().total(), + custom_runtime_upgrade_weight + + runtime_upgrade_weight + + on_initialize_weight + + base_block_weight, + ); + }); +} + +#[test] +fn offchain_worker_works_as_expected() { + new_test_ext(1).execute_with(|| { + let parent_hash = sp_core::H256::from([69u8; 32]); + let mut digest = Digest::default(); + digest.push(DigestItem::Seal([1, 2, 3, 4], vec![5, 6, 7, 8])); + + let header = Header::new(1, H256::default(), H256::default(), parent_hash, digest.clone()); + + Executive::offchain_worker(&header); + + assert_eq!(digest, System::digest()); + assert_eq!(parent_hash, System::block_hash(0)); + assert_eq!(header.hash(), System::block_hash(1)); + }); +} + +#[test] +fn calculating_storage_root_twice_works() { + let call = RuntimeCall::Custom(custom::Call::calculate_storage_root {}); + let xt = UncheckedXt::new_signed(call, 1, 1.into(), tx_ext(0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt])); + }); +} + +#[test] +#[should_panic(expected = "Invalid inherent position for extrinsic at index 1")] +fn invalid_inherent_position_fail() { + let xt1 = UncheckedXt::new_signed( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + 1, + 1.into(), + tx_ext(0, 0), + ); + let xt2 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); +} + +#[test] +fn valid_inherents_position_works() { + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); +} + +#[test] +#[should_panic(expected = "A call was labelled as mandatory, but resulted in an Error.")] +fn invalid_inherents_fail_block_execution() { + let xt1 = UncheckedXt::new_signed( + RuntimeCall::Custom(custom::Call::inherent {}), + 1, + 1.into(), + tx_ext(0, 0), + ); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new( + Header::new(1, H256::default(), H256::default(), [69u8; 32].into(), Digest::default()), + vec![xt1], + )); + }); +} + +// Inherents are created by the runtime and don't need to be validated. +#[test] +fn inherents_fail_validate_block() { + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + + new_test_ext(1).execute_with(|| { + assert_eq!( + Executive::validate_transaction(TransactionSource::External, xt1, H256::random()) + .unwrap_err(), + InvalidTransaction::MandatoryValidation.into() + ); + }) +} + +/// Inherents still work while `initialize_block` forbids transactions. +#[test] +fn inherents_ok_while_exts_forbidden_works() { + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + // This is not applied: + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + // Tell `initialize_block` to forbid extrinsics: + Executive::execute_block(Block::new(header, vec![xt1])); + }); +} + +/// Refuses to import blocks with transactions during `OnlyInherents` mode. +#[test] +#[should_panic = "Only inherents are allowed in this block"] +fn transactions_in_only_inherents_block_errors() { + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); + + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + MbmActive::set(true); + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); +} + +/// Same as above but no error. +#[test] +fn transactions_in_normal_block_works() { + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); + + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + // Tell `initialize_block` to forbid extrinsics: + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); +} + +#[test] +#[cfg(feature = "try-runtime")] +fn try_execute_block_works() { + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); + + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::try_execute_block( + Block::new(header, vec![xt1, xt2]), + true, + true, + frame_try_runtime::TryStateSelect::All, + ) + .unwrap(); + }); +} + +/// Same as `extrinsic_while_exts_forbidden_errors` but using the try-runtime function. +#[test] +#[cfg(feature = "try-runtime")] +#[should_panic = "Only inherents allowed"] +fn try_execute_tx_forbidden_errors() { + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + MbmActive::set(true); + Executive::try_execute_block( + Block::new(header, vec![xt1, xt2]), + true, + true, + frame_try_runtime::TryStateSelect::All, + ) + .unwrap(); + }); +} + +/// Check that `ensure_inherents_are_first` reports the correct indices. +#[test] +fn ensure_inherents_are_first_works() { + let in1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let in2 = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); + + // Mocked empty header: + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + assert_ok!(Runtime::ensure_inherents_are_first(&Block::new(header.clone(), vec![]),), 0); + assert_ok!( + Runtime::ensure_inherents_are_first(&Block::new(header.clone(), vec![xt2.clone()]),), + 0 + ); + assert_ok!( + Runtime::ensure_inherents_are_first(&Block::new(header.clone(), vec![in1.clone()])), + 1 + ); + assert_ok!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![in1.clone(), xt2.clone()] + ),), + 1 + ); + assert_ok!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![in2.clone(), in1.clone(), xt2.clone()] + ),), + 2 + ); + + assert_eq!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![xt2.clone(), in1.clone()] + ),), + Err(1) + ); + assert_eq!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![xt2.clone(), xt2.clone(), in1.clone()] + ),), + Err(2) + ); + assert_eq!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![xt2.clone(), xt2.clone(), xt2.clone(), in2.clone()] + ),), + Err(3) + ); + }); +} + +/// Check that block execution rejects blocks with transactions in them while MBMs are active and +/// also that all the system callbacks are called correctly. +#[test] +fn callbacks_in_block_execution_works() { + callbacks_in_block_execution_works_inner(false); + callbacks_in_block_execution_works_inner(true); +} + +fn callbacks_in_block_execution_works_inner(mbms_active: bool) { + MbmActive::set(mbms_active); + + for (n_in, n_tx) in (0..10usize).zip(0..10usize) { + let mut extrinsics = Vec::new(); + + let header = new_test_ext(10).execute_with(|| { + MockedSystemCallbacks::reset(); + Executive::initialize_block(&Header::new_from_number(1)); + assert_eq!(SystemCallbacksCalled::get(), 1); + + for i in 0..n_in { + let xt = if i % 2 == 0 { + UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})) + } else { + UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::optional_inherent {})) + }; + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + extrinsics.push(xt); + } + + for t in 0..n_tx { + let xt = UncheckedXt::new_signed( + RuntimeCall::Custom2(custom2::Call::some_call {}), + 1, + 1.into(), + tx_ext(t as u64, 0), + ); + // Extrinsics can be applied even when MBMs are active. Only the `execute_block` + // will reject it. + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + extrinsics.push(xt); + } + + Executive::finalize_block() + }); + assert_eq!(SystemCallbacksCalled::get(), 3); + + new_test_ext(10).execute_with(|| { + let header = std::panic::catch_unwind(|| { + Executive::execute_block(Block::new(header, extrinsics)); + }); + + match header { + Err(e) => { + let err = e.downcast::<&str>().unwrap(); + assert_eq!(*err, "Only inherents are allowed in this block"); + assert!( + MbmActive::get() && n_tx > 0, + "Transactions should be rejected when MBMs are active" + ); + }, + Ok(_) => { + assert_eq!(SystemCallbacksCalled::get(), 3); + assert!( + !MbmActive::get() || n_tx == 0, + "MBMs should be deactivated after finalization" + ); + }, + } + }); + } +} + +#[test] +fn post_inherent_called_after_all_inherents() { + let in1 = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::inherent {})); + let xt1 = UncheckedXt::new_signed( + RuntimeCall::Custom2(custom2::Call::some_call {}), + 1, + 1.into(), + tx_ext(0, 0), + ); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(in1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + #[cfg(feature = "try-runtime")] + new_test_ext(1).execute_with(|| { + Executive::try_execute_block( + Block::new(header.clone(), vec![in1.clone(), xt1.clone()]), + true, + true, + frame_try_runtime::TryStateSelect::All, + ) + .unwrap(); + assert!(MockedSystemCallbacks::post_transactions_called()); + }); + + new_test_ext(1).execute_with(|| { + MockedSystemCallbacks::reset(); + Executive::execute_block(Block::new(header, vec![in1, xt1])); + assert!(MockedSystemCallbacks::post_transactions_called()); + }); +} + +/// Regression test for AppSec finding #40. +#[test] +fn post_inherent_called_after_all_optional_inherents() { + let in1 = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::optional_inherent {})); + let xt1 = UncheckedXt::new_signed( + RuntimeCall::Custom2(custom2::Call::some_call {}), + 1, + 1.into(), + tx_ext(0, 0), + ); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(in1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + #[cfg(feature = "try-runtime")] + new_test_ext(1).execute_with(|| { + Executive::try_execute_block( + Block::new(header.clone(), vec![in1.clone(), xt1.clone()]), + true, + true, + frame_try_runtime::TryStateSelect::All, + ) + .unwrap(); + assert!(MockedSystemCallbacks::post_transactions_called()); + }); + + new_test_ext(1).execute_with(|| { + MockedSystemCallbacks::reset(); + Executive::execute_block(Block::new(header, vec![in1, xt1])); + assert!(MockedSystemCallbacks::post_transactions_called()); + }); +} + +#[test] +fn is_inherent_works() { + let ext = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::inherent {})); + assert!(Runtime::is_inherent(&ext)); + let ext = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::optional_inherent {})); + assert!(Runtime::is_inherent(&ext)); + + let ext = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); + assert!(!Runtime::is_inherent(&ext)); + + let ext = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::allowed_unsigned {})); + assert!(!Runtime::is_inherent(&ext), "Unsigned ext are not automatically inherents"); +} diff --git a/substrate/frame/fast-unstake/src/migrations.rs b/substrate/frame/fast-unstake/src/migrations.rs index 564388407045e0ae86b87c7823f3f07c5613e012..97ad86bfff42bf6ee1e258f5663d9bb53144373c 100644 --- a/substrate/frame/fast-unstake/src/migrations.rs +++ b/substrate/frame/fast-unstake/src/migrations.rs @@ -33,12 +33,12 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); diff --git a/substrate/frame/fast-unstake/src/weights.rs b/substrate/frame/fast-unstake/src/weights.rs index 9c25a409f74099fc4d13158f99fae9036b34eff8..d783ba921bf9c11fcf1475d25ad87c6ca600ebf1 100644 --- a/substrate/frame/fast-unstake/src/weights.rs +++ b/substrate/frame/fast-unstake/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_fast_unstake +//! Autogenerated weights for `pallet_fast_unstake` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/fast-unstake/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/fast-unstake/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_fast_unstake. +/// Weight functions needed for `pallet_fast_unstake`. pub trait WeightInfo { fn on_idle_unstake(b: u32, ) -> Weight; fn on_idle_check(v: u32, b: u32, ) -> Weight; @@ -59,301 +58,305 @@ pub trait WeightInfo { fn control() -> Weight; } -/// Weights for pallet_fast_unstake using the Substrate node and recommended hardware. +/// Weights for `pallet_fast_unstake` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ValidatorCount (r:1 w:0) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:1) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:0) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:64 w:0) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Bonded (r:64 w:64) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:64 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:64 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: System Account (r:64 w:64) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:64 w:64) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:64 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:0 w:64) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:64) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:1) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:0) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:64 w:0) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:64 w:64) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:64 w:64) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:64 w:64) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:64 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:64 w:64) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:64 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:64 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:64) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// The range of component `b` is `[1, 64]`. fn on_idle_unstake(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1378 + b * (343 ±0)` + // Measured: `1475 + b * (452 ±0)` // Estimated: `7253 + b * (3774 ±0)` - // Minimum execution time: 92_847_000 picoseconds. - Weight::from_parts(42_300_813, 7253) - // Standard Error: 40_514 - .saturating_add(Weight::from_parts(58_412_402, 0).saturating_mul(b.into())) + // Minimum execution time: 89_005_000 picoseconds. + Weight::from_parts(50_257_055, 7253) + // Standard Error: 68_836 + .saturating_add(Weight::from_parts(57_329_950, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().reads((8_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ValidatorCount (r:1 w:0) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:1) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:0) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasStakers (r:257 w:0) - /// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:1) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:0) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakers` (r:1 w:0) + /// Proof: `Staking::ErasStakers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:257 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1, 256]`. /// The range of component `b` is `[1, 64]`. fn on_idle_check(v: u32, b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1546 + b * (48 ±0) + v * (10037 ±0)` - // Estimated: `7253 + b * (49 ±0) + v * (12513 ±0)` - // Minimum execution time: 1_685_784_000 picoseconds. - Weight::from_parts(1_693_370_000, 7253) - // Standard Error: 13_295_842 - .saturating_add(Weight::from_parts(425_349_148, 0).saturating_mul(v.into())) - // Standard Error: 53_198_180 - .saturating_add(Weight::from_parts(1_673_328_444, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `1879 + b * (55 ±0) + v * (10055 ±0)` + // Estimated: `7253 + b * (56 ±0) + v * (12531 ±0)` + // Minimum execution time: 1_737_131_000 picoseconds. + Weight::from_parts(1_746_770_000, 7253) + // Standard Error: 13_401_143 + .saturating_add(Weight::from_parts(426_946_450, 0).saturating_mul(v.into())) + // Standard Error: 53_619_501 + .saturating_add(Weight::from_parts(1_664_681_508, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 49).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 12513).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(0, 56).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 12531).saturating_mul(v.into())) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: FastUnstake Queue (r:1 w:1) - /// Proof: FastUnstake Queue (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:0) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:1 w:1) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:1) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Queue` (r:1 w:1) + /// Proof: `FastUnstake::Queue` (`max_values`: None, `max_size`: Some(56), added: 2531, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:0) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:1) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn register_fast_unstake() -> Weight { // Proof Size summary in bytes: - // Measured: `1964` + // Measured: `1955` // Estimated: `7253` - // Minimum execution time: 125_512_000 picoseconds. - Weight::from_parts(129_562_000, 7253) + // Minimum execution time: 112_632_000 picoseconds. + Weight::from_parts(117_267_000, 7253) .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(9_u64)) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: FastUnstake Queue (r:1 w:1) - /// Proof: FastUnstake Queue (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:0) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:1) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Queue` (r:1 w:1) + /// Proof: `FastUnstake::Queue` (`max_values`: None, `max_size`: Some(56), added: 2531, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:0) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:1) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `1223` + // Measured: `1251` // Estimated: `7253` - // Minimum execution time: 43_943_000 picoseconds. - Weight::from_parts(45_842_000, 7253) + // Minimum execution time: 39_253_000 picoseconds. + Weight::from_parts(40_053_000, 7253) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:0 w:1) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn control() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_677_000 picoseconds. - Weight::from_parts(2_849_000, 0) + // Minimum execution time: 2_386_000 picoseconds. + Weight::from_parts(2_508_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ValidatorCount (r:1 w:0) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:1) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:0) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:64 w:0) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Bonded (r:64 w:64) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:64 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:64 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: System Account (r:64 w:64) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:64 w:64) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:64 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:0 w:64) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:64) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:1) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:0) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:64 w:0) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:64 w:64) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:64 w:64) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:64 w:64) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:64 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:64 w:64) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:64 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:64 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:64) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// The range of component `b` is `[1, 64]`. fn on_idle_unstake(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1378 + b * (343 ±0)` + // Measured: `1475 + b * (452 ±0)` // Estimated: `7253 + b * (3774 ±0)` - // Minimum execution time: 92_847_000 picoseconds. - Weight::from_parts(42_300_813, 7253) - // Standard Error: 40_514 - .saturating_add(Weight::from_parts(58_412_402, 0).saturating_mul(b.into())) + // Minimum execution time: 89_005_000 picoseconds. + Weight::from_parts(50_257_055, 7253) + // Standard Error: 68_836 + .saturating_add(Weight::from_parts(57_329_950, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((7_u64).saturating_mul(b.into()))) + .saturating_add(RocksDbWeight::get().reads((8_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ValidatorCount (r:1 w:0) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:1) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:0) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasStakers (r:257 w:0) - /// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:1) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:0) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakers` (r:1 w:0) + /// Proof: `Staking::ErasStakers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:257 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1, 256]`. /// The range of component `b` is `[1, 64]`. fn on_idle_check(v: u32, b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1546 + b * (48 ±0) + v * (10037 ±0)` - // Estimated: `7253 + b * (49 ±0) + v * (12513 ±0)` - // Minimum execution time: 1_685_784_000 picoseconds. - Weight::from_parts(1_693_370_000, 7253) - // Standard Error: 13_295_842 - .saturating_add(Weight::from_parts(425_349_148, 0).saturating_mul(v.into())) - // Standard Error: 53_198_180 - .saturating_add(Weight::from_parts(1_673_328_444, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `1879 + b * (55 ±0) + v * (10055 ±0)` + // Estimated: `7253 + b * (56 ±0) + v * (12531 ±0)` + // Minimum execution time: 1_737_131_000 picoseconds. + Weight::from_parts(1_746_770_000, 7253) + // Standard Error: 13_401_143 + .saturating_add(Weight::from_parts(426_946_450, 0).saturating_mul(v.into())) + // Standard Error: 53_619_501 + .saturating_add(Weight::from_parts(1_664_681_508, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 49).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 12513).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(0, 56).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 12531).saturating_mul(v.into())) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: FastUnstake Queue (r:1 w:1) - /// Proof: FastUnstake Queue (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:0) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:1 w:1) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:1) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Queue` (r:1 w:1) + /// Proof: `FastUnstake::Queue` (`max_values`: None, `max_size`: Some(56), added: 2531, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:0) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:1) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn register_fast_unstake() -> Weight { // Proof Size summary in bytes: - // Measured: `1964` + // Measured: `1955` // Estimated: `7253` - // Minimum execution time: 125_512_000 picoseconds. - Weight::from_parts(129_562_000, 7253) + // Minimum execution time: 112_632_000 picoseconds. + Weight::from_parts(117_267_000, 7253) .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().writes(9_u64)) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: FastUnstake Queue (r:1 w:1) - /// Proof: FastUnstake Queue (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:0) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:1) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Queue` (r:1 w:1) + /// Proof: `FastUnstake::Queue` (`max_values`: None, `max_size`: Some(56), added: 2531, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:0) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:1) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `1223` + // Measured: `1251` // Estimated: `7253` - // Minimum execution time: 43_943_000 picoseconds. - Weight::from_parts(45_842_000, 7253) + // Minimum execution time: 39_253_000 picoseconds. + Weight::from_parts(40_053_000, 7253) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:0 w:1) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn control() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_677_000 picoseconds. - Weight::from_parts(2_849_000, 0) + // Minimum execution time: 2_386_000 picoseconds. + Weight::from_parts(2_508_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/glutton/src/weights.rs b/substrate/frame/glutton/src/weights.rs index cbc0fb022f510889e66a403f1624c025faa5a7f8..b2e28dc488a6f24dc6da4690ff4939129ea9d2f3 100644 --- a/substrate/frame/glutton/src/weights.rs +++ b/substrate/frame/glutton/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_glutton +//! Autogenerated weights for `pallet_glutton` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/glutton/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/glutton/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_glutton. +/// Weight functions needed for `pallet_glutton`. pub trait WeightInfo { fn initialize_pallet_grow(n: u32, ) -> Weight; fn initialize_pallet_shrink(n: u32, ) -> Weight; @@ -63,39 +62,39 @@ pub trait WeightInfo { fn set_storage() -> Weight; } -/// Weights for pallet_glutton using the Substrate node and recommended hardware. +/// Weights for `pallet_glutton` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Glutton TrashDataCount (r:1 w:1) - /// Proof: Glutton TrashDataCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:0 w:1000) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::TrashDataCount` (r:1 w:1) + /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:0 w:1000) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_grow(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1489` - // Minimum execution time: 11_488_000 picoseconds. - Weight::from_parts(93_073_710, 1489) - // Standard Error: 22_390 - .saturating_add(Weight::from_parts(9_572_012, 0).saturating_mul(n.into())) + // Minimum execution time: 8_443_000 picoseconds. + Weight::from_parts(103_698_651, 1489) + // Standard Error: 21_777 + .saturating_add(Weight::from_parts(9_529_476, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } - /// Storage: Glutton TrashDataCount (r:1 w:1) - /// Proof: Glutton TrashDataCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:0 w:1000) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::TrashDataCount` (r:1 w:1) + /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:0 w:1000) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_shrink(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `119` // Estimated: `1489` - // Minimum execution time: 11_378_000 picoseconds. - Weight::from_parts(5_591_508, 1489) - // Standard Error: 1_592 - .saturating_add(Weight::from_parts(1_163_758, 0).saturating_mul(n.into())) + // Minimum execution time: 8_343_000 picoseconds. + Weight::from_parts(304_498, 1489) + // Standard Error: 1_568 + .saturating_add(Weight::from_parts(1_146_553, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -105,119 +104,119 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 669_000 picoseconds. - Weight::from_parts(990_745, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(105_224, 0).saturating_mul(i.into())) + // Minimum execution time: 656_000 picoseconds. + Weight::from_parts(1_875_128, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(103_381, 0).saturating_mul(i.into())) } - /// Storage: Glutton TrashData (r:5000 w:0) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::TrashData` (r:5000 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5000]`. fn waste_proof_size_some(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `119114 + i * (1022 ±0)` // Estimated: `990 + i * (3016 ±0)` - // Minimum execution time: 435_000 picoseconds. - Weight::from_parts(66_547_542, 990) - // Standard Error: 4_557 - .saturating_add(Weight::from_parts(5_851_324, 0).saturating_mul(i.into())) + // Minimum execution time: 454_000 picoseconds. + Weight::from_parts(521_000, 990) + // Standard Error: 1_940 + .saturating_add(Weight::from_parts(5_729_831, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) } - /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:1737 w:0) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:1737 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_high_proof_waste() -> Weight { // Proof Size summary in bytes: // Measured: `1900497` // Estimated: `5239782` - // Minimum execution time: 67_699_845_000 picoseconds. - Weight::from_parts(67_893_204_000, 5239782) + // Minimum execution time: 55_403_909_000 picoseconds. + Weight::from_parts(55_472_412_000, 5239782) .saturating_add(T::DbWeight::get().reads(1739_u64)) } - /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:5 w:0) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:5 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_low_proof_waste() -> Weight { // Proof Size summary in bytes: // Measured: `9547` // Estimated: `16070` - // Minimum execution time: 122_297_527_000 picoseconds. - Weight::from_parts(122_394_818_000, 16070) + // Minimum execution time: 97_959_007_000 picoseconds. + Weight::from_parts(98_030_476_000, 16070) .saturating_add(T::DbWeight::get().reads(7_u64)) } - /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn empty_on_idle() -> Weight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1493` - // Minimum execution time: 5_882_000 picoseconds. - Weight::from_parts(6_138_000, 1493) + // Minimum execution time: 5_011_000 picoseconds. + Weight::from_parts(5_183_000, 1493) .saturating_add(T::DbWeight::get().reads(2_u64)) } - /// Storage: Glutton Compute (r:0 w:1) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Glutton::Compute` (r:0 w:1) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set_compute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_830_000 picoseconds. - Weight::from_parts(8_366_000, 0) + // Minimum execution time: 5_591_000 picoseconds. + Weight::from_parts(5_970_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Glutton Storage (r:0 w:1) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:0 w:1) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_933_000 picoseconds. - Weight::from_parts(8_213_000, 0) + // Minimum execution time: 5_689_000 picoseconds. + Weight::from_parts(6_038_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Glutton TrashDataCount (r:1 w:1) - /// Proof: Glutton TrashDataCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:0 w:1000) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::TrashDataCount` (r:1 w:1) + /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:0 w:1000) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_grow(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1489` - // Minimum execution time: 11_488_000 picoseconds. - Weight::from_parts(93_073_710, 1489) - // Standard Error: 22_390 - .saturating_add(Weight::from_parts(9_572_012, 0).saturating_mul(n.into())) + // Minimum execution time: 8_443_000 picoseconds. + Weight::from_parts(103_698_651, 1489) + // Standard Error: 21_777 + .saturating_add(Weight::from_parts(9_529_476, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) } - /// Storage: Glutton TrashDataCount (r:1 w:1) - /// Proof: Glutton TrashDataCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:0 w:1000) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::TrashDataCount` (r:1 w:1) + /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:0 w:1000) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_shrink(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `119` // Estimated: `1489` - // Minimum execution time: 11_378_000 picoseconds. - Weight::from_parts(5_591_508, 1489) - // Standard Error: 1_592 - .saturating_add(Weight::from_parts(1_163_758, 0).saturating_mul(n.into())) + // Minimum execution time: 8_343_000 picoseconds. + Weight::from_parts(304_498, 1489) + // Standard Error: 1_568 + .saturating_add(Weight::from_parts(1_146_553, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -227,83 +226,83 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 669_000 picoseconds. - Weight::from_parts(990_745, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(105_224, 0).saturating_mul(i.into())) + // Minimum execution time: 656_000 picoseconds. + Weight::from_parts(1_875_128, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(103_381, 0).saturating_mul(i.into())) } - /// Storage: Glutton TrashData (r:5000 w:0) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::TrashData` (r:5000 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5000]`. fn waste_proof_size_some(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `119114 + i * (1022 ±0)` // Estimated: `990 + i * (3016 ±0)` - // Minimum execution time: 435_000 picoseconds. - Weight::from_parts(66_547_542, 990) - // Standard Error: 4_557 - .saturating_add(Weight::from_parts(5_851_324, 0).saturating_mul(i.into())) + // Minimum execution time: 454_000 picoseconds. + Weight::from_parts(521_000, 990) + // Standard Error: 1_940 + .saturating_add(Weight::from_parts(5_729_831, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) } - /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:1737 w:0) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:1737 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_high_proof_waste() -> Weight { // Proof Size summary in bytes: // Measured: `1900497` // Estimated: `5239782` - // Minimum execution time: 67_699_845_000 picoseconds. - Weight::from_parts(67_893_204_000, 5239782) + // Minimum execution time: 55_403_909_000 picoseconds. + Weight::from_parts(55_472_412_000, 5239782) .saturating_add(RocksDbWeight::get().reads(1739_u64)) } - /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:5 w:0) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:5 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_low_proof_waste() -> Weight { // Proof Size summary in bytes: // Measured: `9547` // Estimated: `16070` - // Minimum execution time: 122_297_527_000 picoseconds. - Weight::from_parts(122_394_818_000, 16070) + // Minimum execution time: 97_959_007_000 picoseconds. + Weight::from_parts(98_030_476_000, 16070) .saturating_add(RocksDbWeight::get().reads(7_u64)) } - /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn empty_on_idle() -> Weight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1493` - // Minimum execution time: 5_882_000 picoseconds. - Weight::from_parts(6_138_000, 1493) + // Minimum execution time: 5_011_000 picoseconds. + Weight::from_parts(5_183_000, 1493) .saturating_add(RocksDbWeight::get().reads(2_u64)) } - /// Storage: Glutton Compute (r:0 w:1) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Glutton::Compute` (r:0 w:1) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set_compute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_830_000 picoseconds. - Weight::from_parts(8_366_000, 0) + // Minimum execution time: 5_591_000 picoseconds. + Weight::from_parts(5_970_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Glutton Storage (r:0 w:1) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:0 w:1) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_933_000 picoseconds. - Weight::from_parts(8_213_000, 0) + // Minimum execution time: 5_689_000 picoseconds. + Weight::from_parts(6_038_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 0b9f2b3582792e9d93695d34656912a8d70a131f..90bcd8721dfa1f6d091edc34da883c2172d9f6ca 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -73,7 +73,7 @@ pub mod pallet { use frame_support::{dispatch::DispatchResult, pallet_prelude::*}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); #[pallet::pallet] diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 5d48f974c31476f74a876d3ed8a74ff4b2bdf8d3..e1be487dbb75cf2c7523e808ec8dc8ec015eadb1 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -35,11 +35,8 @@ use sp_consensus_grandpa::{RoundNumber, SetId, GRANDPA_ENGINE_ID}; use sp_core::{crypto::KeyTypeId, H256}; use sp_keyring::Ed25519Keyring; use sp_runtime::{ - curve::PiecewiseLinear, - impl_opaque_keys, - testing::{TestXt, UintAuthorityId}, - traits::OpaqueKeys, - BuildStorage, DigestItem, Perbill, + curve::PiecewiseLinear, generic::UncheckedExtrinsic, impl_opaque_keys, + testing::UintAuthorityId, traits::OpaqueKeys, BuildStorage, DigestItem, Perbill, }; use sp_staking::{EraIndex, SessionIndex}; @@ -77,7 +74,7 @@ where RuntimeCall: From, { type OverarchingCall = RuntimeCall; - type Extrinsic = TestXt; + type Extrinsic = UncheckedExtrinsic; } parameter_types! { diff --git a/substrate/frame/identity/src/weights.rs b/substrate/frame/identity/src/weights.rs index 1feb8252c845383ff8a31f2f992e6cf6b3c0001d..81de520d7f2bca8f298e136b7fb58d35ba3f28d8 100644 --- a/substrate/frame/identity/src/weights.rs +++ b/substrate/frame/identity/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_identity +//! Autogenerated weights for `pallet_identity` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/identity/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/identity/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_identity. +/// Weight functions needed for `pallet_identity`. pub trait WeightInfo { fn add_registrar(r: u32, ) -> Weight; fn set_identity(r: u32, ) -> Weight; @@ -77,278 +76,274 @@ pub trait WeightInfo { fn remove_dangling_username() -> Weight; } -/// Weights for pallet_identity using the Substrate node and recommended hardware. +/// Weights for `pallet_identity` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `32 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 11_683_000 picoseconds. - Weight::from_parts(12_515_830, 2626) - // Standard Error: 2_154 - .saturating_add(Weight::from_parts(147_919, 0).saturating_mul(r.into())) + // Minimum execution time: 8_857_000 picoseconds. + Weight::from_parts(9_331_464, 2626) + // Standard Error: 1_745 + .saturating_add(Weight::from_parts(94_096, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. - fn set_identity(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `442 + r * (5 ±0)` - // Estimated: `11003` - // Minimum execution time: 32_949_000 picoseconds. - Weight::from_parts(31_329_634, 11003) - // Standard Error: 4_496 - .saturating_add(Weight::from_parts(203_570, 0).saturating_mul(r.into())) + fn set_identity(_r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `6978 + r * (5 ±0)` + // Estimated: `11037` + // Minimum execution time: 96_522_000 picoseconds. + Weight::from_parts(102_738_605, 11037) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:100 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:100 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `101` - // Estimated: `11003 + s * (2589 ±0)` - // Minimum execution time: 9_157_000 picoseconds. - Weight::from_parts(24_917_444, 11003) - // Standard Error: 4_554 - .saturating_add(Weight::from_parts(3_279_868, 0).saturating_mul(s.into())) + // Estimated: `11037 + s * (2589 ±0)` + // Minimum execution time: 9_112_000 picoseconds. + Weight::from_parts(22_676_873, 11037) + // Standard Error: 6_494 + .saturating_add(Weight::from_parts(3_279_196, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 100]`. fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `194 + p * (32 ±0)` - // Estimated: `11003` - // Minimum execution time: 9_240_000 picoseconds. - Weight::from_parts(23_326_035, 11003) - // Standard Error: 3_664 - .saturating_add(Weight::from_parts(1_439_873, 0).saturating_mul(p.into())) + // Estimated: `11037` + // Minimum execution time: 8_902_000 picoseconds. + Weight::from_parts(21_168_031, 11037) + // Standard Error: 3_576 + .saturating_add(Weight::from_parts(1_431_542, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - fn clear_identity(r: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 55_687_000 picoseconds. - Weight::from_parts(30_695_182, 11003) - // Standard Error: 9_921 - .saturating_add(Weight::from_parts(162_357, 0).saturating_mul(r.into())) - // Standard Error: 1_937 - .saturating_add(Weight::from_parts(1_427_998, 0).saturating_mul(s.into())) + fn clear_identity(_r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7070 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 52_468_000 picoseconds. + Weight::from_parts(56_065_452, 11037) + // Standard Error: 2_274 + .saturating_add(Weight::from_parts(1_399_051, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `367 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 34_876_000 picoseconds. - Weight::from_parts(32_207_018, 11003) - // Standard Error: 5_247 - .saturating_add(Weight::from_parts(249_156, 0).saturating_mul(r.into())) + // Measured: `6968 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 67_951_000 picoseconds. + Weight::from_parts(69_591_058, 11037) + // Standard Error: 4_420 + .saturating_add(Weight::from_parts(209_239, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `398 + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 30_689_000 picoseconds. - Weight::from_parts(31_967_170, 11003) - // Standard Error: 5_387 - .saturating_add(Weight::from_parts(42_676, 0).saturating_mul(r.into())) + // Measured: `6999` + // Estimated: `11037` + // Minimum execution time: 67_818_000 picoseconds. + Weight::from_parts(70_356_319, 11037) + // Standard Error: 5_116 + .saturating_add(Weight::from_parts(4_264, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_357_000 picoseconds. - Weight::from_parts(7_932_950, 2626) - // Standard Error: 1_804 - .saturating_add(Weight::from_parts(132_653, 0).saturating_mul(r.into())) + // Minimum execution time: 6_096_000 picoseconds. + Weight::from_parts(6_470_752, 2626) + // Standard Error: 1_328 + .saturating_add(Weight::from_parts(94_764, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_437_000 picoseconds. - Weight::from_parts(8_051_889, 2626) - // Standard Error: 1_997 - .saturating_add(Weight::from_parts(129_592, 0).saturating_mul(r.into())) + // Minimum execution time: 6_278_000 picoseconds. + Weight::from_parts(6_678_997, 2626) + // Standard Error: 1_396 + .saturating_add(Weight::from_parts(87_207, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_385_000 picoseconds. - Weight::from_parts(7_911_589, 2626) - // Standard Error: 1_791 - .saturating_add(Weight::from_parts(125_788, 0).saturating_mul(r.into())) + // Minimum execution time: 6_130_000 picoseconds. + Weight::from_parts(6_516_146, 2626) + // Standard Error: 1_356 + .saturating_add(Weight::from_parts(88_455, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `445 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 24_073_000 picoseconds. - Weight::from_parts(17_817_684, 11003) - // Standard Error: 8_612 - .saturating_add(Weight::from_parts(406_251, 0).saturating_mul(r.into())) + // Measured: `7046 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 87_100_000 picoseconds. + Weight::from_parts(87_601_945, 11037) + // Standard Error: 22_724 + .saturating_add(Weight::from_parts(458_296, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 73_981_000 picoseconds. - Weight::from_parts(51_684_057, 11003) - // Standard Error: 12_662 - .saturating_add(Weight::from_parts(145_285, 0).saturating_mul(r.into())) - // Standard Error: 2_472 - .saturating_add(Weight::from_parts(1_421_039, 0).saturating_mul(s.into())) + // Measured: `7277 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 65_925_000 picoseconds. + Weight::from_parts(70_786_250, 11037) + // Standard Error: 19_680 + .saturating_add(Weight::from_parts(29_782, 0).saturating_mul(r.into())) + // Standard Error: 3_839 + .saturating_add(Weight::from_parts(1_377_604, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `475 + s * (36 ±0)` - // Estimated: `11003` - // Minimum execution time: 29_367_000 picoseconds. - Weight::from_parts(34_214_998, 11003) - // Standard Error: 1_522 - .saturating_add(Weight::from_parts(114_551, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 25_916_000 picoseconds. + Weight::from_parts(29_781_270, 11037) + // Standard Error: 1_326 + .saturating_add(Weight::from_parts(101_515, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `591 + s * (3 ±0)` - // Estimated: `11003` - // Minimum execution time: 12_384_000 picoseconds. - Weight::from_parts(14_417_903, 11003) - // Standard Error: 539 - .saturating_add(Weight::from_parts(38_371, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 12_142_000 picoseconds. + Weight::from_parts(14_179_741, 11037) + // Standard Error: 538 + .saturating_add(Weight::from_parts(38_847, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `638 + s * (35 ±0)` - // Estimated: `11003` - // Minimum execution time: 33_327_000 picoseconds. - Weight::from_parts(36_208_941, 11003) - // Standard Error: 1_240 - .saturating_add(Weight::from_parts(105_805, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 29_327_000 picoseconds. + Weight::from_parts(32_163_402, 11037) + // Standard Error: 1_047 + .saturating_add(Weight::from_parts(87_159, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `704 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 23_764_000 picoseconds. - Weight::from_parts(26_407_731, 6723) - // Standard Error: 1_025 - .saturating_add(Weight::from_parts(101_112, 0).saturating_mul(s.into())) + // Minimum execution time: 22_380_000 picoseconds. + Weight::from_parts(24_557_574, 6723) + // Standard Error: 1_131 + .saturating_add(Weight::from_parts(86_358, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -358,367 +353,359 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_873_000 picoseconds. - Weight::from_parts(13_873_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) + // Minimum execution time: 6_791_000 picoseconds. + Weight::from_parts(7_126_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Identity::UsernameAuthorities` (r:0 w:1) + /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn remove_username_authority() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_653_000 picoseconds. - Weight::from_parts(10_653_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 9_657_000 picoseconds. + Weight::from_parts(9_922_000, 3517) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Identity::PendingUsernames` (r:1 w:0) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_username_for() -> Weight { // Proof Size summary in bytes: // Measured: `80` // Estimated: `11037` - // Minimum execution time: 75_928_000 picoseconds. - Weight::from_parts(75_928_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + // Minimum execution time: 67_928_000 picoseconds. + Weight::from_parts(69_993_000, 11037) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:0 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn accept_username() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `115` // Estimated: `11037` - // Minimum execution time: 38_157_000 picoseconds. - Weight::from_parts(38_157_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(3)) + // Minimum execution time: 20_496_000 picoseconds. + Weight::from_parts(20_971_000, 11037) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn remove_expired_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3542` - // Minimum execution time: 46_821_000 picoseconds. - Weight::from_parts(46_821_000, 0) - .saturating_add(Weight::from_parts(0, 3542)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) + // Measured: `115` + // Estimated: `3550` + // Minimum execution time: 13_845_000 picoseconds. + Weight::from_parts(16_201_000, 3550) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:0) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_primary_username() -> Weight { // Proof Size summary in bytes: - // Measured: `247` + // Measured: `257` // Estimated: `11037` - // Minimum execution time: 22_515_000 picoseconds. - Weight::from_parts(22_515_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) + // Minimum execution time: 15_900_000 picoseconds. + Weight::from_parts(16_716_000, 11037) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:0) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn remove_dangling_username() -> Weight { // Proof Size summary in bytes: - // Measured: `126` + // Measured: `98` // Estimated: `11037` - // Minimum execution time: 15_997_000 picoseconds. - Weight::from_parts(15_997_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) + // Minimum execution time: 11_538_000 picoseconds. + Weight::from_parts(11_898_000, 11037) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `32 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 11_683_000 picoseconds. - Weight::from_parts(12_515_830, 2626) - // Standard Error: 2_154 - .saturating_add(Weight::from_parts(147_919, 0).saturating_mul(r.into())) + // Minimum execution time: 8_857_000 picoseconds. + Weight::from_parts(9_331_464, 2626) + // Standard Error: 1_745 + .saturating_add(Weight::from_parts(94_096, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. - fn set_identity(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `442 + r * (5 ±0)` - // Estimated: `11003` - // Minimum execution time: 32_949_000 picoseconds. - Weight::from_parts(31_329_634, 11003) - // Standard Error: 4_496 - .saturating_add(Weight::from_parts(203_570, 0).saturating_mul(r.into())) + fn set_identity(_r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `6978 + r * (5 ±0)` + // Estimated: `11037` + // Minimum execution time: 96_522_000 picoseconds. + Weight::from_parts(102_738_605, 11037) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:100 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:100 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `101` - // Estimated: `11003 + s * (2589 ±0)` - // Minimum execution time: 9_157_000 picoseconds. - Weight::from_parts(24_917_444, 11003) - // Standard Error: 4_554 - .saturating_add(Weight::from_parts(3_279_868, 0).saturating_mul(s.into())) + // Estimated: `11037 + s * (2589 ±0)` + // Minimum execution time: 9_112_000 picoseconds. + Weight::from_parts(22_676_873, 11037) + // Standard Error: 6_494 + .saturating_add(Weight::from_parts(3_279_196, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 100]`. fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `194 + p * (32 ±0)` - // Estimated: `11003` - // Minimum execution time: 9_240_000 picoseconds. - Weight::from_parts(23_326_035, 11003) - // Standard Error: 3_664 - .saturating_add(Weight::from_parts(1_439_873, 0).saturating_mul(p.into())) + // Estimated: `11037` + // Minimum execution time: 8_902_000 picoseconds. + Weight::from_parts(21_168_031, 11037) + // Standard Error: 3_576 + .saturating_add(Weight::from_parts(1_431_542, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - fn clear_identity(r: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 55_687_000 picoseconds. - Weight::from_parts(30_695_182, 11003) - // Standard Error: 9_921 - .saturating_add(Weight::from_parts(162_357, 0).saturating_mul(r.into())) - // Standard Error: 1_937 - .saturating_add(Weight::from_parts(1_427_998, 0).saturating_mul(s.into())) + fn clear_identity(_r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7070 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 52_468_000 picoseconds. + Weight::from_parts(56_065_452, 11037) + // Standard Error: 2_274 + .saturating_add(Weight::from_parts(1_399_051, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `367 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 34_876_000 picoseconds. - Weight::from_parts(32_207_018, 11003) - // Standard Error: 5_247 - .saturating_add(Weight::from_parts(249_156, 0).saturating_mul(r.into())) + // Measured: `6968 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 67_951_000 picoseconds. + Weight::from_parts(69_591_058, 11037) + // Standard Error: 4_420 + .saturating_add(Weight::from_parts(209_239, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `398 + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 30_689_000 picoseconds. - Weight::from_parts(31_967_170, 11003) - // Standard Error: 5_387 - .saturating_add(Weight::from_parts(42_676, 0).saturating_mul(r.into())) + // Measured: `6999` + // Estimated: `11037` + // Minimum execution time: 67_818_000 picoseconds. + Weight::from_parts(70_356_319, 11037) + // Standard Error: 5_116 + .saturating_add(Weight::from_parts(4_264, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_357_000 picoseconds. - Weight::from_parts(7_932_950, 2626) - // Standard Error: 1_804 - .saturating_add(Weight::from_parts(132_653, 0).saturating_mul(r.into())) + // Minimum execution time: 6_096_000 picoseconds. + Weight::from_parts(6_470_752, 2626) + // Standard Error: 1_328 + .saturating_add(Weight::from_parts(94_764, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_437_000 picoseconds. - Weight::from_parts(8_051_889, 2626) - // Standard Error: 1_997 - .saturating_add(Weight::from_parts(129_592, 0).saturating_mul(r.into())) + // Minimum execution time: 6_278_000 picoseconds. + Weight::from_parts(6_678_997, 2626) + // Standard Error: 1_396 + .saturating_add(Weight::from_parts(87_207, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_385_000 picoseconds. - Weight::from_parts(7_911_589, 2626) - // Standard Error: 1_791 - .saturating_add(Weight::from_parts(125_788, 0).saturating_mul(r.into())) + // Minimum execution time: 6_130_000 picoseconds. + Weight::from_parts(6_516_146, 2626) + // Standard Error: 1_356 + .saturating_add(Weight::from_parts(88_455, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `445 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 24_073_000 picoseconds. - Weight::from_parts(17_817_684, 11003) - // Standard Error: 8_612 - .saturating_add(Weight::from_parts(406_251, 0).saturating_mul(r.into())) + // Measured: `7046 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 87_100_000 picoseconds. + Weight::from_parts(87_601_945, 11037) + // Standard Error: 22_724 + .saturating_add(Weight::from_parts(458_296, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 73_981_000 picoseconds. - Weight::from_parts(51_684_057, 11003) - // Standard Error: 12_662 - .saturating_add(Weight::from_parts(145_285, 0).saturating_mul(r.into())) - // Standard Error: 2_472 - .saturating_add(Weight::from_parts(1_421_039, 0).saturating_mul(s.into())) + // Measured: `7277 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 65_925_000 picoseconds. + Weight::from_parts(70_786_250, 11037) + // Standard Error: 19_680 + .saturating_add(Weight::from_parts(29_782, 0).saturating_mul(r.into())) + // Standard Error: 3_839 + .saturating_add(Weight::from_parts(1_377_604, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `475 + s * (36 ±0)` - // Estimated: `11003` - // Minimum execution time: 29_367_000 picoseconds. - Weight::from_parts(34_214_998, 11003) - // Standard Error: 1_522 - .saturating_add(Weight::from_parts(114_551, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 25_916_000 picoseconds. + Weight::from_parts(29_781_270, 11037) + // Standard Error: 1_326 + .saturating_add(Weight::from_parts(101_515, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `591 + s * (3 ±0)` - // Estimated: `11003` - // Minimum execution time: 12_384_000 picoseconds. - Weight::from_parts(14_417_903, 11003) - // Standard Error: 539 - .saturating_add(Weight::from_parts(38_371, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 12_142_000 picoseconds. + Weight::from_parts(14_179_741, 11037) + // Standard Error: 538 + .saturating_add(Weight::from_parts(38_847, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `638 + s * (35 ±0)` - // Estimated: `11003` - // Minimum execution time: 33_327_000 picoseconds. - Weight::from_parts(36_208_941, 11003) - // Standard Error: 1_240 - .saturating_add(Weight::from_parts(105_805, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 29_327_000 picoseconds. + Weight::from_parts(32_163_402, 11037) + // Standard Error: 1_047 + .saturating_add(Weight::from_parts(87_159, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `704 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 23_764_000 picoseconds. - Weight::from_parts(26_407_731, 6723) - // Standard Error: 1_025 - .saturating_add(Weight::from_parts(101_112, 0).saturating_mul(s.into())) + // Minimum execution time: 22_380_000 picoseconds. + Weight::from_parts(24_557_574, 6723) + // Standard Error: 1_131 + .saturating_add(Weight::from_parts(86_358, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -728,92 +715,88 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_873_000 picoseconds. - Weight::from_parts(13_873_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Minimum execution time: 6_791_000 picoseconds. + Weight::from_parts(7_126_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Identity::UsernameAuthorities` (r:0 w:1) + /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn remove_username_authority() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_653_000 picoseconds. - Weight::from_parts(10_653_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 9_657_000 picoseconds. + Weight::from_parts(9_922_000, 3517) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Identity::PendingUsernames` (r:1 w:0) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_username_for() -> Weight { // Proof Size summary in bytes: // Measured: `80` // Estimated: `11037` - // Minimum execution time: 75_928_000 picoseconds. - Weight::from_parts(75_928_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(RocksDbWeight::get().reads(3)) - .saturating_add(RocksDbWeight::get().writes(3)) + // Minimum execution time: 67_928_000 picoseconds. + Weight::from_parts(69_993_000, 11037) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:0 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn accept_username() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `115` // Estimated: `11037` - // Minimum execution time: 38_157_000 picoseconds. - Weight::from_parts(38_157_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(RocksDbWeight::get().reads(2)) - .saturating_add(RocksDbWeight::get().writes(3)) + // Minimum execution time: 20_496_000 picoseconds. + Weight::from_parts(20_971_000, 11037) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn remove_expired_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3542` - // Minimum execution time: 46_821_000 picoseconds. - Weight::from_parts(46_821_000, 0) - .saturating_add(Weight::from_parts(0, 3542)) - .saturating_add(RocksDbWeight::get().reads(1)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Measured: `115` + // Estimated: `3550` + // Minimum execution time: 13_845_000 picoseconds. + Weight::from_parts(16_201_000, 3550) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:0) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_primary_username() -> Weight { // Proof Size summary in bytes: - // Measured: `247` + // Measured: `257` // Estimated: `11037` - // Minimum execution time: 22_515_000 picoseconds. - Weight::from_parts(22_515_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(RocksDbWeight::get().reads(2)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Minimum execution time: 15_900_000 picoseconds. + Weight::from_parts(16_716_000, 11037) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:0) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn remove_dangling_username() -> Weight { // Proof Size summary in bytes: - // Measured: `126` + // Measured: `98` // Estimated: `11037` - // Minimum execution time: 15_997_000 picoseconds. - Weight::from_parts(15_997_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(RocksDbWeight::get().reads(2)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Minimum execution time: 11_538_000 picoseconds. + Weight::from_parts(11_898_000, 11037) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs index 1de89dd00c8121c64a19f8b45f24fc6500126dce..f14093aa09afa98da1c37b0e961319e80821fe40 100644 --- a/substrate/frame/im-online/src/lib.rs +++ b/substrate/frame/im-online/src/lib.rs @@ -251,7 +251,7 @@ type OffchainResult = Result>>; pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/im-online/src/migration.rs b/substrate/frame/im-online/src/migration.rs index 0d2c0a055b6d88855bc8783b65e06f0427197208..754a2e672e6cfaec32ed0e44c9af54469fddbb04 100644 --- a/substrate/frame/im-online/src/migration.rs +++ b/substrate/frame/im-online/src/migration.rs @@ -72,7 +72,7 @@ pub mod v1 { if StorageVersion::get::>() != 0 { log::warn!( target: TARGET, - "Skipping migration because current storage version is not 0" + "Skipping migration because in-code storage version is not 0" ); return weight } diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs index 9dad148b10fa1b0942e93f1c5d183264ab282f05..c0ba9143e1385ebc0b57445c864a270d28936b8d 100644 --- a/substrate/frame/im-online/src/mock.rs +++ b/substrate/frame/im-online/src/mock.rs @@ -27,7 +27,7 @@ use frame_support::{ use pallet_session::historical as pallet_session_historical; use sp_core::H256; use sp_runtime::{ - testing::{TestXt, UintAuthorityId}, + testing::UintAuthorityId, traits::{BlakeTwo256, ConvertInto, IdentityLookup}, BuildStorage, Permill, }; @@ -78,7 +78,7 @@ impl pallet_session::historical::SessionManager for TestSessionManager } /// An extrinsic type used for tests. -pub type Extrinsic = TestXt; +pub type Extrinsic = sp_runtime::generic::UncheckedExtrinsic; type IdentificationTuple = (u64, u64); type Offence = crate::UnresponsivenessOffence; diff --git a/substrate/frame/im-online/src/tests.rs b/substrate/frame/im-online/src/tests.rs index 79036760c2d427c2895163b150f3806bf17222f2..7efca926eb0dc4f0098af01967b5214ab371e143 100644 --- a/substrate/frame/im-online/src/tests.rs +++ b/substrate/frame/im-online/src/tests.rs @@ -231,7 +231,7 @@ fn should_generate_heartbeats() { // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); - let heartbeat = match ex.call { + let heartbeat = match ex.function { crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => heartbeat, e => panic!("Unexpected call: {:?}", e), @@ -345,7 +345,7 @@ fn should_not_send_a_report_if_already_online() { assert_eq!(pool_state.read().transactions.len(), 0); // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); - let heartbeat = match ex.call { + let heartbeat = match ex.function { crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => heartbeat, e => panic!("Unexpected call: {:?}", e), diff --git a/substrate/frame/im-online/src/weights.rs b/substrate/frame/im-online/src/weights.rs index c3db02af2578209d9ab97a777ef4a7447d45b738..11357b1e7b7d38983966ff6d03e91ddec0bd10cf 100644 --- a/substrate/frame/im-online/src/weights.rs +++ b/substrate/frame/im-online/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_im_online +//! Autogenerated weights for `pallet_im_online` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/im-online/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/im-online/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,60 +49,60 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_im_online. +/// Weight functions needed for `pallet_im_online`. pub trait WeightInfo { fn validate_unsigned_and_then_heartbeat(k: u32, ) -> Weight; } -/// Weights for pallet_im_online using the Substrate node and recommended hardware. +/// Weights for `pallet_im_online` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Session CurrentIndex (r:1 w:0) - /// Proof Skipped: Session CurrentIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ImOnline Keys (r:1 w:0) - /// Proof: ImOnline Keys (max_values: Some(1), max_size: Some(320002), added: 320497, mode: MaxEncodedLen) - /// Storage: ImOnline ReceivedHeartbeats (r:1 w:1) - /// Proof: ImOnline ReceivedHeartbeats (max_values: None, max_size: Some(25), added: 2500, mode: MaxEncodedLen) - /// Storage: ImOnline AuthoredBlocks (r:1 w:0) - /// Proof: ImOnline AuthoredBlocks (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::CurrentIndex` (r:1 w:0) + /// Proof: `Session::CurrentIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ImOnline::Keys` (r:1 w:0) + /// Proof: `ImOnline::Keys` (`max_values`: Some(1), `max_size`: Some(320002), added: 320497, mode: `MaxEncodedLen`) + /// Storage: `ImOnline::ReceivedHeartbeats` (r:1 w:1) + /// Proof: `ImOnline::ReceivedHeartbeats` (`max_values`: None, `max_size`: Some(25), added: 2500, mode: `MaxEncodedLen`) + /// Storage: `ImOnline::AuthoredBlocks` (r:1 w:0) + /// Proof: `ImOnline::AuthoredBlocks` (`max_values`: None, `max_size`: Some(56), added: 2531, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 1000]`. fn validate_unsigned_and_then_heartbeat(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `295 + k * (32 ±0)` + // Measured: `327 + k * (32 ±0)` // Estimated: `321487 + k * (1761 ±0)` - // Minimum execution time: 80_568_000 picoseconds. - Weight::from_parts(95_175_595, 321487) - // Standard Error: 627 - .saturating_add(Weight::from_parts(39_094, 0).saturating_mul(k.into())) + // Minimum execution time: 65_515_000 picoseconds. + Weight::from_parts(74_765_329, 321487) + // Standard Error: 500 + .saturating_add(Weight::from_parts(39_171, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1761).saturating_mul(k.into())) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Session CurrentIndex (r:1 w:0) - /// Proof Skipped: Session CurrentIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ImOnline Keys (r:1 w:0) - /// Proof: ImOnline Keys (max_values: Some(1), max_size: Some(320002), added: 320497, mode: MaxEncodedLen) - /// Storage: ImOnline ReceivedHeartbeats (r:1 w:1) - /// Proof: ImOnline ReceivedHeartbeats (max_values: None, max_size: Some(25), added: 2500, mode: MaxEncodedLen) - /// Storage: ImOnline AuthoredBlocks (r:1 w:0) - /// Proof: ImOnline AuthoredBlocks (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::CurrentIndex` (r:1 w:0) + /// Proof: `Session::CurrentIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ImOnline::Keys` (r:1 w:0) + /// Proof: `ImOnline::Keys` (`max_values`: Some(1), `max_size`: Some(320002), added: 320497, mode: `MaxEncodedLen`) + /// Storage: `ImOnline::ReceivedHeartbeats` (r:1 w:1) + /// Proof: `ImOnline::ReceivedHeartbeats` (`max_values`: None, `max_size`: Some(25), added: 2500, mode: `MaxEncodedLen`) + /// Storage: `ImOnline::AuthoredBlocks` (r:1 w:0) + /// Proof: `ImOnline::AuthoredBlocks` (`max_values`: None, `max_size`: Some(56), added: 2531, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 1000]`. fn validate_unsigned_and_then_heartbeat(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `295 + k * (32 ±0)` + // Measured: `327 + k * (32 ±0)` // Estimated: `321487 + k * (1761 ±0)` - // Minimum execution time: 80_568_000 picoseconds. - Weight::from_parts(95_175_595, 321487) - // Standard Error: 627 - .saturating_add(Weight::from_parts(39_094, 0).saturating_mul(k.into())) + // Minimum execution time: 65_515_000 picoseconds. + Weight::from_parts(74_765_329, 321487) + // Standard Error: 500 + .saturating_add(Weight::from_parts(39_171, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1761).saturating_mul(k.into())) diff --git a/substrate/frame/indices/src/weights.rs b/substrate/frame/indices/src/weights.rs index d081cc738b1dbe6c1991b30ccb454307e4ca19ab..6b571164eb69e3ac25732a19067ee445ead52514 100644 --- a/substrate/frame/indices/src/weights.rs +++ b/substrate/frame/indices/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_indices +//! Autogenerated weights for `pallet_indices` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/indices/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/indices/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_indices. +/// Weight functions needed for `pallet_indices`. pub trait WeightInfo { fn claim() -> Weight; fn transfer() -> Weight; @@ -59,128 +58,128 @@ pub trait WeightInfo { fn freeze() -> Weight; } -/// Weights for pallet_indices using the Substrate node and recommended hardware. +/// Weights for `pallet_indices` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn claim() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3534` - // Minimum execution time: 25_491_000 picoseconds. - Weight::from_parts(26_456_000, 3534) + // Minimum execution time: 20_825_000 picoseconds. + Weight::from_parts(21_507_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 38_027_000 picoseconds. - Weight::from_parts(38_749_000, 3593) + // Minimum execution time: 31_091_000 picoseconds. + Weight::from_parts(31_923_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn free() -> Weight { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 26_652_000 picoseconds. - Weight::from_parts(27_273_000, 3534) + // Minimum execution time: 21_832_000 picoseconds. + Weight::from_parts(22_436_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 29_464_000 picoseconds. - Weight::from_parts(30_959_000, 3593) + // Minimum execution time: 23_876_000 picoseconds. + Weight::from_parts(24_954_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 29_015_000 picoseconds. - Weight::from_parts(29_714_000, 3534) + // Minimum execution time: 22_954_000 picoseconds. + Weight::from_parts(23_792_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn claim() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3534` - // Minimum execution time: 25_491_000 picoseconds. - Weight::from_parts(26_456_000, 3534) + // Minimum execution time: 20_825_000 picoseconds. + Weight::from_parts(21_507_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 38_027_000 picoseconds. - Weight::from_parts(38_749_000, 3593) + // Minimum execution time: 31_091_000 picoseconds. + Weight::from_parts(31_923_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn free() -> Weight { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 26_652_000 picoseconds. - Weight::from_parts(27_273_000, 3534) + // Minimum execution time: 21_832_000 picoseconds. + Weight::from_parts(22_436_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 29_464_000 picoseconds. - Weight::from_parts(30_959_000, 3593) + // Minimum execution time: 23_876_000 picoseconds. + Weight::from_parts(24_954_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 29_015_000 picoseconds. - Weight::from_parts(29_714_000, 3534) + // Minimum execution time: 22_954_000 picoseconds. + Weight::from_parts(23_792_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/lottery/src/weights.rs b/substrate/frame/lottery/src/weights.rs index 3b4e562375345befa93f431145a2aff776ec7517..7e56cdf90db5c7844dc8183b0a14381a910fdd24 100644 --- a/substrate/frame/lottery/src/weights.rs +++ b/substrate/frame/lottery/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_lottery +//! Autogenerated weights for `pallet_lottery` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/lottery/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/lottery/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_lottery. +/// Weight functions needed for `pallet_lottery`. pub trait WeightInfo { fn buy_ticket() -> Weight; fn set_calls(n: u32, ) -> Weight; @@ -60,214 +59,222 @@ pub trait WeightInfo { fn on_initialize_repeat() -> Weight; } -/// Weights for pallet_lottery using the Substrate node and recommended hardware. +/// Weights for `pallet_lottery` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Lottery Lottery (r:1 w:0) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: Lottery CallIndices (r:1 w:0) - /// Proof: Lottery CallIndices (max_values: Some(1), max_size: Some(21), added: 516, mode: MaxEncodedLen) - /// Storage: Lottery TicketsCount (r:1 w:1) - /// Proof: Lottery TicketsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Lottery Participants (r:1 w:1) - /// Proof: Lottery Participants (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Lottery LotteryIndex (r:1 w:0) - /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Lottery Tickets (r:0 w:1) - /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Lottery` (r:1 w:0) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `Lottery::CallIndices` (r:1 w:0) + /// Proof: `Lottery::CallIndices` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) + /// Storage: `Lottery::TicketsCount` (r:1 w:1) + /// Proof: `Lottery::TicketsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Participants` (r:1 w:1) + /// Proof: `Lottery::Participants` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Lottery::LotteryIndex` (r:1 w:0) + /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Tickets` (r:0 w:1) + /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn buy_ticket() -> Weight { // Proof Size summary in bytes: - // Measured: `452` - // Estimated: `3593` - // Minimum execution time: 60_298_000 picoseconds. - Weight::from_parts(62_058_000, 3593) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `492` + // Estimated: `3997` + // Minimum execution time: 57_891_000 picoseconds. + Weight::from_parts(59_508_000, 3997) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Lottery CallIndices (r:0 w:1) - /// Proof: Lottery CallIndices (max_values: Some(1), max_size: Some(21), added: 516, mode: MaxEncodedLen) + /// Storage: `Lottery::CallIndices` (r:0 w:1) + /// Proof: `Lottery::CallIndices` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 10]`. fn set_calls(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_291_000 picoseconds. - Weight::from_parts(8_178_186, 0) - // Standard Error: 3_048 - .saturating_add(Weight::from_parts(330_871, 0).saturating_mul(n.into())) + // Minimum execution time: 5_026_000 picoseconds. + Weight::from_parts(5_854_666, 0) + // Standard Error: 3_233 + .saturating_add(Weight::from_parts(334_818, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: Lottery LotteryIndex (r:1 w:1) - /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `Lottery::LotteryIndex` (r:1 w:1) + /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn start_lottery() -> Weight { // Proof Size summary in bytes: - // Measured: `161` + // Measured: `194` // Estimated: `3593` - // Minimum execution time: 36_741_000 picoseconds. - Weight::from_parts(38_288_000, 3593) + // Minimum execution time: 26_216_000 picoseconds. + Weight::from_parts(27_216_000, 3593) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) fn stop_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `219` + // Measured: `252` // Estimated: `1514` - // Minimum execution time: 7_270_000 picoseconds. - Weight::from_parts(7_578_000, 1514) + // Minimum execution time: 6_208_000 picoseconds. + Weight::from_parts(6_427_000, 1514) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) - /// Proof: RandomnessCollectiveFlip RandomMaterial (max_values: Some(1), max_size: Some(2594), added: 3089, mode: MaxEncodedLen) - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Lottery TicketsCount (r:1 w:1) - /// Proof: Lottery TicketsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Lottery Tickets (r:1 w:0) - /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) + /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Lottery::TicketsCount` (r:1 w:1) + /// Proof: `Lottery::TicketsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Tickets` (r:1 w:0) + /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn on_initialize_end() -> Weight { // Proof Size summary in bytes: - // Measured: `558` + // Measured: `591` // Estimated: `6196` - // Minimum execution time: 76_611_000 picoseconds. - Weight::from_parts(78_107_000, 6196) + // Minimum execution time: 58_660_000 picoseconds. + Weight::from_parts(59_358_000, 6196) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) - /// Proof: RandomnessCollectiveFlip RandomMaterial (max_values: Some(1), max_size: Some(2594), added: 3089, mode: MaxEncodedLen) - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Lottery TicketsCount (r:1 w:1) - /// Proof: Lottery TicketsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Lottery Tickets (r:1 w:0) - /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) - /// Storage: Lottery LotteryIndex (r:1 w:1) - /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) + /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Lottery::TicketsCount` (r:1 w:1) + /// Proof: `Lottery::TicketsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Tickets` (r:1 w:0) + /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `Lottery::LotteryIndex` (r:1 w:1) + /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn on_initialize_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `558` + // Measured: `591` // Estimated: `6196` - // Minimum execution time: 78_731_000 picoseconds. - Weight::from_parts(80_248_000, 6196) + // Minimum execution time: 59_376_000 picoseconds. + Weight::from_parts(60_598_000, 6196) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Lottery Lottery (r:1 w:0) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: Lottery CallIndices (r:1 w:0) - /// Proof: Lottery CallIndices (max_values: Some(1), max_size: Some(21), added: 516, mode: MaxEncodedLen) - /// Storage: Lottery TicketsCount (r:1 w:1) - /// Proof: Lottery TicketsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Lottery Participants (r:1 w:1) - /// Proof: Lottery Participants (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Lottery LotteryIndex (r:1 w:0) - /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Lottery Tickets (r:0 w:1) - /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Lottery` (r:1 w:0) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `Lottery::CallIndices` (r:1 w:0) + /// Proof: `Lottery::CallIndices` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) + /// Storage: `Lottery::TicketsCount` (r:1 w:1) + /// Proof: `Lottery::TicketsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Participants` (r:1 w:1) + /// Proof: `Lottery::Participants` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Lottery::LotteryIndex` (r:1 w:0) + /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Tickets` (r:0 w:1) + /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn buy_ticket() -> Weight { // Proof Size summary in bytes: - // Measured: `452` - // Estimated: `3593` - // Minimum execution time: 60_298_000 picoseconds. - Weight::from_parts(62_058_000, 3593) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `492` + // Estimated: `3997` + // Minimum execution time: 57_891_000 picoseconds. + Weight::from_parts(59_508_000, 3997) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Lottery CallIndices (r:0 w:1) - /// Proof: Lottery CallIndices (max_values: Some(1), max_size: Some(21), added: 516, mode: MaxEncodedLen) + /// Storage: `Lottery::CallIndices` (r:0 w:1) + /// Proof: `Lottery::CallIndices` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 10]`. fn set_calls(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_291_000 picoseconds. - Weight::from_parts(8_178_186, 0) - // Standard Error: 3_048 - .saturating_add(Weight::from_parts(330_871, 0).saturating_mul(n.into())) + // Minimum execution time: 5_026_000 picoseconds. + Weight::from_parts(5_854_666, 0) + // Standard Error: 3_233 + .saturating_add(Weight::from_parts(334_818, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: Lottery LotteryIndex (r:1 w:1) - /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `Lottery::LotteryIndex` (r:1 w:1) + /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn start_lottery() -> Weight { // Proof Size summary in bytes: - // Measured: `161` + // Measured: `194` // Estimated: `3593` - // Minimum execution time: 36_741_000 picoseconds. - Weight::from_parts(38_288_000, 3593) + // Minimum execution time: 26_216_000 picoseconds. + Weight::from_parts(27_216_000, 3593) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) fn stop_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `219` + // Measured: `252` // Estimated: `1514` - // Minimum execution time: 7_270_000 picoseconds. - Weight::from_parts(7_578_000, 1514) + // Minimum execution time: 6_208_000 picoseconds. + Weight::from_parts(6_427_000, 1514) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) - /// Proof: RandomnessCollectiveFlip RandomMaterial (max_values: Some(1), max_size: Some(2594), added: 3089, mode: MaxEncodedLen) - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Lottery TicketsCount (r:1 w:1) - /// Proof: Lottery TicketsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Lottery Tickets (r:1 w:0) - /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) + /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Lottery::TicketsCount` (r:1 w:1) + /// Proof: `Lottery::TicketsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Tickets` (r:1 w:0) + /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn on_initialize_end() -> Weight { // Proof Size summary in bytes: - // Measured: `558` + // Measured: `591` // Estimated: `6196` - // Minimum execution time: 76_611_000 picoseconds. - Weight::from_parts(78_107_000, 6196) + // Minimum execution time: 58_660_000 picoseconds. + Weight::from_parts(59_358_000, 6196) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) - /// Proof: RandomnessCollectiveFlip RandomMaterial (max_values: Some(1), max_size: Some(2594), added: 3089, mode: MaxEncodedLen) - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Lottery TicketsCount (r:1 w:1) - /// Proof: Lottery TicketsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Lottery Tickets (r:1 w:0) - /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) - /// Storage: Lottery LotteryIndex (r:1 w:1) - /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) + /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Lottery::TicketsCount` (r:1 w:1) + /// Proof: `Lottery::TicketsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Tickets` (r:1 w:0) + /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `Lottery::LotteryIndex` (r:1 w:1) + /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn on_initialize_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `558` + // Measured: `591` // Estimated: `6196` - // Minimum execution time: 78_731_000 picoseconds. - Weight::from_parts(80_248_000, 6196) + // Minimum execution time: 59_376_000 picoseconds. + Weight::from_parts(60_598_000, 6196) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } diff --git a/substrate/frame/membership/src/lib.rs b/substrate/frame/membership/src/lib.rs index f32263970038d967a5e8ea697d4a9ff3ef9d26ca..f4ac697130de87ea48fdb6be522289c6368d10c8 100644 --- a/substrate/frame/membership/src/lib.rs +++ b/substrate/frame/membership/src/lib.rs @@ -46,7 +46,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] diff --git a/substrate/frame/membership/src/weights.rs b/substrate/frame/membership/src/weights.rs index 2d18848b89ab5b55c22235282fb78b514f7ed937..f21867d687079a7dd5396320e621d26ee2af8996 100644 --- a/substrate/frame/membership/src/weights.rs +++ b/substrate/frame/membership/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_membership +//! Autogenerated weights for `pallet_membership` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/membership/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/membership/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_membership. +/// Weight functions needed for `pallet_membership`. pub trait WeightInfo { fn add_member(m: u32, ) -> Weight; fn remove_member(m: u32, ) -> Weight; @@ -61,299 +60,299 @@ pub trait WeightInfo { fn clear_prime() -> Weight; } -/// Weights for pallet_membership using the Substrate node and recommended hardware. +/// Weights for `pallet_membership` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 99]`. fn add_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `208 + m * (64 ±0)` + // Measured: `207 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 17_040_000 picoseconds. - Weight::from_parts(18_344_571, 4687) - // Standard Error: 847 - .saturating_add(Weight::from_parts(50_842, 0).saturating_mul(m.into())) + // Minimum execution time: 12_126_000 picoseconds. + Weight::from_parts(13_085_583, 4687) + // Standard Error: 659 + .saturating_add(Weight::from_parts(36_103, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:0) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[2, 100]`. fn remove_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 ±0)` + // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_088_000 picoseconds. - Weight::from_parts(21_271_384, 4687) - // Standard Error: 786 - .saturating_add(Weight::from_parts(44_806, 0).saturating_mul(m.into())) + // Minimum execution time: 14_571_000 picoseconds. + Weight::from_parts(15_532_232, 4687) + // Standard Error: 531 + .saturating_add(Weight::from_parts(35_757, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:0) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[2, 100]`. fn swap_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 ±0)` + // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_308_000 picoseconds. - Weight::from_parts(21_469_843, 4687) - // Standard Error: 782 - .saturating_add(Weight::from_parts(56_893, 0).saturating_mul(m.into())) + // Minimum execution time: 14_833_000 picoseconds. + Weight::from_parts(15_657_084, 4687) + // Standard Error: 650 + .saturating_add(Weight::from_parts(44_467, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:0) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. fn reset_members(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 ±0)` + // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 19_464_000 picoseconds. - Weight::from_parts(21_223_702, 4687) - // Standard Error: 1_068 - .saturating_add(Weight::from_parts(165_438, 0).saturating_mul(m.into())) + // Minimum execution time: 14_629_000 picoseconds. + Weight::from_parts(15_578_203, 4687) + // Standard Error: 910 + .saturating_add(Weight::from_parts(145_101, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:1) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. fn change_key(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 ±0)` + // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_965_000 picoseconds. - Weight::from_parts(22_551_007, 4687) - // Standard Error: 860 - .saturating_add(Weight::from_parts(52_397, 0).saturating_mul(m.into())) + // Minimum execution time: 15_218_000 picoseconds. + Weight::from_parts(16_388_690, 4687) + // Standard Error: 626 + .saturating_add(Weight::from_parts(46_204, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:0) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalMembership Prime (r:0 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:0) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalMembership::Prime` (r:0 w:1) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. fn set_prime(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `32 + m * (32 ±0)` + // Measured: `31 + m * (32 ±0)` // Estimated: `4687 + m * (32 ±0)` - // Minimum execution time: 7_481_000 picoseconds. - Weight::from_parts(7_959_053, 4687) - // Standard Error: 364 - .saturating_add(Weight::from_parts(18_653, 0).saturating_mul(m.into())) + // Minimum execution time: 5_954_000 picoseconds. + Weight::from_parts(6_544_638, 4687) + // Standard Error: 346 + .saturating_add(Weight::from_parts(17_638, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Prime (r:0 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Prime` (r:0 w:1) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn clear_prime() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_373_000 picoseconds. - Weight::from_parts(3_750_452, 0) + // Minimum execution time: 2_569_000 picoseconds. + Weight::from_parts(2_776_000, 0) .saturating_add(T::DbWeight::get().writes(2_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 99]`. fn add_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `208 + m * (64 ±0)` + // Measured: `207 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 17_040_000 picoseconds. - Weight::from_parts(18_344_571, 4687) - // Standard Error: 847 - .saturating_add(Weight::from_parts(50_842, 0).saturating_mul(m.into())) + // Minimum execution time: 12_126_000 picoseconds. + Weight::from_parts(13_085_583, 4687) + // Standard Error: 659 + .saturating_add(Weight::from_parts(36_103, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:0) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[2, 100]`. fn remove_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 ±0)` + // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_088_000 picoseconds. - Weight::from_parts(21_271_384, 4687) - // Standard Error: 786 - .saturating_add(Weight::from_parts(44_806, 0).saturating_mul(m.into())) + // Minimum execution time: 14_571_000 picoseconds. + Weight::from_parts(15_532_232, 4687) + // Standard Error: 531 + .saturating_add(Weight::from_parts(35_757, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:0) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[2, 100]`. fn swap_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 ±0)` + // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_308_000 picoseconds. - Weight::from_parts(21_469_843, 4687) - // Standard Error: 782 - .saturating_add(Weight::from_parts(56_893, 0).saturating_mul(m.into())) + // Minimum execution time: 14_833_000 picoseconds. + Weight::from_parts(15_657_084, 4687) + // Standard Error: 650 + .saturating_add(Weight::from_parts(44_467, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:0) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. fn reset_members(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 ±0)` + // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 19_464_000 picoseconds. - Weight::from_parts(21_223_702, 4687) - // Standard Error: 1_068 - .saturating_add(Weight::from_parts(165_438, 0).saturating_mul(m.into())) + // Minimum execution time: 14_629_000 picoseconds. + Weight::from_parts(15_578_203, 4687) + // Standard Error: 910 + .saturating_add(Weight::from_parts(145_101, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:1) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. fn change_key(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 ±0)` + // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_965_000 picoseconds. - Weight::from_parts(22_551_007, 4687) - // Standard Error: 860 - .saturating_add(Weight::from_parts(52_397, 0).saturating_mul(m.into())) + // Minimum execution time: 15_218_000 picoseconds. + Weight::from_parts(16_388_690, 4687) + // Standard Error: 626 + .saturating_add(Weight::from_parts(46_204, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:0) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalMembership Prime (r:0 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:0) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalMembership::Prime` (r:0 w:1) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. fn set_prime(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `32 + m * (32 ±0)` + // Measured: `31 + m * (32 ±0)` // Estimated: `4687 + m * (32 ±0)` - // Minimum execution time: 7_481_000 picoseconds. - Weight::from_parts(7_959_053, 4687) - // Standard Error: 364 - .saturating_add(Weight::from_parts(18_653, 0).saturating_mul(m.into())) + // Minimum execution time: 5_954_000 picoseconds. + Weight::from_parts(6_544_638, 4687) + // Standard Error: 346 + .saturating_add(Weight::from_parts(17_638, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Prime (r:0 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Prime` (r:0 w:1) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn clear_prime() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_373_000 picoseconds. - Weight::from_parts(3_750_452, 0) + // Minimum execution time: 2_569_000 picoseconds. + Weight::from_parts(2_776_000, 0) .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/substrate/frame/message-queue/src/integration_test.rs b/substrate/frame/message-queue/src/integration_test.rs index cc3da6ebdc669e5710867aee63252a4bf5ca6889..ce8eb80805ab0401bf7f4ff4e2448983333c7b36 100644 --- a/substrate/frame/message-queue/src/integration_test.rs +++ b/substrate/frame/message-queue/src/integration_test.rs @@ -330,6 +330,11 @@ fn process_some_messages(num_msgs: u32) { ServiceWeight::set(Some(weight)); let consumed = next_block(); + for origin in BookStateFor::::iter_keys() { + let fp = MessageQueue::footprint(origin); + assert_eq!(fp.pages, fp.ready_pages); + } + assert_eq!(consumed, weight, "\n{}", MessageQueue::debug_info()); assert_eq!(NumMessagesProcessed::take(), num_msgs as usize); } diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index 07eb0041985342522a113339769b98f834ef4a17..61028057394f6f3bedda173288fa1c9aea3e9e9a 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -208,8 +208,9 @@ use frame_support::{ defensive, pallet_prelude::*, traits::{ - Defensive, DefensiveTruncateFrom, EnqueueMessage, ExecuteOverweightError, Footprint, - ProcessMessage, ProcessMessageError, QueueFootprint, QueuePausedQuery, ServiceQueues, + Defensive, DefensiveSaturating, DefensiveTruncateFrom, EnqueueMessage, + ExecuteOverweightError, Footprint, ProcessMessage, ProcessMessageError, QueueFootprint, + QueuePausedQuery, ServiceQueues, }, BoundedSlice, CloneNoBound, DefaultNoBound, }; @@ -442,6 +443,7 @@ impl From> for QueueFootprint { fn from(book: BookState) -> Self { QueueFootprint { pages: book.count, + ready_pages: book.end.defensive_saturating_sub(book.begin), storage: Footprint { count: book.message_count, size: book.size }, } } @@ -1281,6 +1283,9 @@ impl Pallet { ensure!(book.message_count < 1 << 30, "Likely overflow or corruption"); ensure!(book.size < 1 << 30, "Likely overflow or corruption"); ensure!(book.count < 1 << 30, "Likely overflow or corruption"); + + let fp: QueueFootprint = book.into(); + ensure!(fp.ready_pages <= fp.pages, "There cannot be more ready than total pages"); } //loop around this origin diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index a46fa31df3e20d302650e68a26d8bcb96e5496a2..d176a981ca8ee2f6911176b1a25907c7bc197566 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -355,8 +355,8 @@ pub fn num_overweight_enqueued_events() -> u32 { .count() as u32 } -pub fn fp(pages: u32, count: u64, size: u64) -> QueueFootprint { - QueueFootprint { storage: Footprint { count, size }, pages } +pub fn fp(pages: u32, ready_pages: u32, count: u64, size: u64) -> QueueFootprint { + QueueFootprint { storage: Footprint { count, size }, pages, ready_pages } } /// A random seed that can be overwritten with `MQ_SEED`. diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index 86a8b79fe8bd01fc5f906ef42e22339c669e57b5..dbef94b3b10354a8b503b795bd46bf90010bb80f 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -1064,13 +1064,13 @@ fn footprint_num_pages_works() { MessageQueue::enqueue_message(msg("weight=2"), Here); MessageQueue::enqueue_message(msg("weight=3"), Here); - assert_eq!(MessageQueue::footprint(Here), fp(2, 2, 16)); + assert_eq!(MessageQueue::footprint(Here), fp(2, 2, 2, 16)); // Mark the messages as overweight. assert_eq!(MessageQueue::service_queues(1.into_weight()), 0.into_weight()); assert_eq!(System::events().len(), 2); - // Overweight does not change the footprint. - assert_eq!(MessageQueue::footprint(Here), fp(2, 2, 16)); + // `ready_pages` decreases but `page` count does not. + assert_eq!(MessageQueue::footprint(Here), fp(2, 0, 2, 16)); // Now execute the second message. assert_eq!( @@ -1078,7 +1078,7 @@ fn footprint_num_pages_works() { .unwrap(), 3.into_weight() ); - assert_eq!(MessageQueue::footprint(Here), fp(1, 1, 8)); + assert_eq!(MessageQueue::footprint(Here), fp(1, 0, 1, 8)); // And the first one: assert_eq!( ::execute_overweight(2.into_weight(), (Here, 0, 0)) @@ -1086,6 +1086,11 @@ fn footprint_num_pages_works() { 2.into_weight() ); assert_eq!(MessageQueue::footprint(Here), Default::default()); + assert_eq!(MessageQueue::footprint(Here), fp(0, 0, 0, 0)); + + // `ready_pages` and normal `pages` increases again: + MessageQueue::enqueue_message(msg("weight=3"), Here); + assert_eq!(MessageQueue::footprint(Here), fp(1, 1, 1, 8)); }) } diff --git a/substrate/frame/message-queue/src/weights.rs b/substrate/frame/message-queue/src/weights.rs index e86f23e274ff246aa9f557626e06a42a4930154c..001e2834e1cc619b48500035c4505d453707dc03 100644 --- a/substrate/frame/message-queue/src/weights.rs +++ b/substrate/frame/message-queue/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_message_queue +//! Autogenerated weights for `pallet_message_queue` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/message-queue/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/message-queue/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_message_queue. +/// Weight functions needed for `pallet_message_queue`. pub trait WeightInfo { fn ready_ring_knit() -> Weight; fn ready_ring_unknit() -> Weight; @@ -64,246 +63,256 @@ pub trait WeightInfo { fn execute_overweight_page_updated() -> Weight; } -/// Weights for pallet_message_queue using the Substrate node and recommended hardware. +/// Weights for `pallet_message_queue` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `267` + // Measured: `301` // Estimated: `6038` - // Minimum execution time: 12_025_000 picoseconds. - Weight::from_parts(12_597_000, 6038) + // Minimum execution time: 11_563_000 picoseconds. + Weight::from_parts(11_956_000, 6038) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `267` + // Measured: `301` // Estimated: `6038` - // Minimum execution time: 11_563_000 picoseconds. - Weight::from_parts(11_785_000, 6038) + // Minimum execution time: 10_263_000 picoseconds. + Weight::from_parts(10_638_000, 6038) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3514` - // Minimum execution time: 4_467_000 picoseconds. - Weight::from_parts(4_655_000, 3514) + // Minimum execution time: 4_335_000 picoseconds. + Weight::from_parts(4_552_000, 3514) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 6_103_000 picoseconds. - Weight::from_parts(6_254_000, 69049) + // Minimum execution time: 6_016_000 picoseconds. + Weight::from_parts(6_224_000, 69049) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 6_320_000 picoseconds. - Weight::from_parts(6_565_000, 69049) + // Minimum execution time: 6_183_000 picoseconds. + Weight::from_parts(6_348_000, 69049) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 66_062_000 picoseconds. - Weight::from_parts(66_371_000, 0) + // Minimum execution time: 112_864_000 picoseconds. + Weight::from_parts(114_269_000, 0) + .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `246` // Estimated: `3514` - // Minimum execution time: 6_788_000 picoseconds. - Weight::from_parts(7_176_000, 3514) + // Minimum execution time: 6_665_000 picoseconds. + Weight::from_parts(7_108_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 52_865_000 picoseconds. - Weight::from_parts(54_398_000, 69049) + // Minimum execution time: 51_420_000 picoseconds. + Weight::from_parts(52_252_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 69_168_000 picoseconds. - Weight::from_parts(70_560_000, 69049) + // Minimum execution time: 71_195_000 picoseconds. + Weight::from_parts(72_981_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 80_947_000 picoseconds. - Weight::from_parts(82_715_000, 69049) + // Minimum execution time: 83_238_000 picoseconds. + Weight::from_parts(84_422_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `267` + // Measured: `301` // Estimated: `6038` - // Minimum execution time: 12_025_000 picoseconds. - Weight::from_parts(12_597_000, 6038) + // Minimum execution time: 11_563_000 picoseconds. + Weight::from_parts(11_956_000, 6038) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `267` + // Measured: `301` // Estimated: `6038` - // Minimum execution time: 11_563_000 picoseconds. - Weight::from_parts(11_785_000, 6038) + // Minimum execution time: 10_263_000 picoseconds. + Weight::from_parts(10_638_000, 6038) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3514` - // Minimum execution time: 4_467_000 picoseconds. - Weight::from_parts(4_655_000, 3514) + // Minimum execution time: 4_335_000 picoseconds. + Weight::from_parts(4_552_000, 3514) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 6_103_000 picoseconds. - Weight::from_parts(6_254_000, 69049) + // Minimum execution time: 6_016_000 picoseconds. + Weight::from_parts(6_224_000, 69049) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 6_320_000 picoseconds. - Weight::from_parts(6_565_000, 69049) + // Minimum execution time: 6_183_000 picoseconds. + Weight::from_parts(6_348_000, 69049) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 66_062_000 picoseconds. - Weight::from_parts(66_371_000, 0) + // Minimum execution time: 112_864_000 picoseconds. + Weight::from_parts(114_269_000, 0) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `246` // Estimated: `3514` - // Minimum execution time: 6_788_000 picoseconds. - Weight::from_parts(7_176_000, 3514) + // Minimum execution time: 6_665_000 picoseconds. + Weight::from_parts(7_108_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 52_865_000 picoseconds. - Weight::from_parts(54_398_000, 69049) + // Minimum execution time: 51_420_000 picoseconds. + Weight::from_parts(52_252_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 69_168_000 picoseconds. - Weight::from_parts(70_560_000, 69049) + // Minimum execution time: 71_195_000 picoseconds. + Weight::from_parts(72_981_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 80_947_000 picoseconds. - Weight::from_parts(82_715_000, 69049) + // Minimum execution time: 83_238_000 picoseconds. + Weight::from_parts(84_422_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/migrations/Cargo.toml b/substrate/frame/migrations/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..2914aa9ea973eafc3eee5419b3b8d8562b23c4f4 --- /dev/null +++ b/substrate/frame/migrations/Cargo.toml @@ -0,0 +1,64 @@ +[package] +name = "pallet-migrations" +version = "1.0.0" +description = "FRAME pallet to execute multi-block migrations." +authors.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +docify = "0.1.14" +impl-trait-for-tuples = "0.2.2" +log = "0.4.18" +scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } + +frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" } +frame-support = { default-features = false, path = "../support" } +frame-system = { default-features = false, path = "../system" } +sp-core = { path = "../../primitives/core", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } + +[dev-dependencies] +frame-executive = { path = "../executive" } +sp-api = { path = "../../primitives/api", features = ["std"] } +sp-block-builder = { path = "../../primitives/block-builder", features = ["std"] } +sp-io = { path = "../../primitives/io", features = ["std"] } +sp-tracing = { path = "../../primitives/tracing", features = ["std"] } +sp-version = { path = "../../primitives/version", features = ["std"] } + +pretty_assertions = "1.3.0" + +[features] +default = ["std"] + +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] + +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] + +try-runtime = [ + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/migrations/src/benchmarking.rs b/substrate/frame/migrations/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..8ad1fa50d14985842051c9ac6fb3f95e30bdb030 --- /dev/null +++ b/substrate/frame/migrations/src/benchmarking.rs @@ -0,0 +1,222 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use frame_benchmarking::{v2::*, BenchmarkError}; +use frame_system::{Pallet as System, RawOrigin}; +use sp_runtime::traits::One; + +fn assert_last_event(generic_event: ::RuntimeEvent) { + frame_system::Pallet::::assert_last_event(generic_event.into()); +} + +#[benchmarks] +mod benches { + use super::*; + use frame_support::traits::Hooks; + + #[benchmark] + fn onboard_new_mbms() { + T::Migrations::set_fail_after(0); // Should not be called anyway. + assert!(!Cursor::::exists()); + + #[block] + { + Pallet::::onboard_new_mbms(); + } + + assert_last_event::(Event::UpgradeStarted { migrations: 1 }.into()); + } + + #[benchmark] + fn progress_mbms_none() { + T::Migrations::set_fail_after(0); // Should not be called anyway. + assert!(!Cursor::::exists()); + + #[block] + { + Pallet::::progress_mbms(One::one()); + } + } + + /// All migrations completed. + #[benchmark] + fn exec_migration_completed() -> Result<(), BenchmarkError> { + T::Migrations::set_fail_after(0); // Should not be called anyway. + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 1, inner_cursor: None, started_at: 0u32.into() }; + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::UpgradeCompleted {}.into()); + + Ok(()) + } + + /// No migration runs since it is skipped as historic. + #[benchmark] + fn exec_migration_skipped_historic() -> Result<(), BenchmarkError> { + T::Migrations::set_fail_after(0); // Should not be called anyway. + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 0, inner_cursor: None, started_at: 0u32.into() }; + + let id: IdentifierOf = T::Migrations::nth_id(0).unwrap().try_into().unwrap(); + Historic::::insert(id, ()); + + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::MigrationSkipped { index: 0 }.into()); + + Ok(()) + } + + /// Advance a migration by one step. + #[benchmark] + fn exec_migration_advance() -> Result<(), BenchmarkError> { + T::Migrations::set_success_after(1); + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 0, inner_cursor: None, started_at: 0u32.into() }; + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::MigrationAdvanced { index: 0, took: One::one() }.into()); + + Ok(()) + } + + /// Successfully complete a migration. + #[benchmark] + fn exec_migration_complete() -> Result<(), BenchmarkError> { + T::Migrations::set_success_after(0); + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 0, inner_cursor: None, started_at: 0u32.into() }; + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::MigrationCompleted { index: 0, took: One::one() }.into()); + + Ok(()) + } + + #[benchmark] + fn exec_migration_fail() -> Result<(), BenchmarkError> { + T::Migrations::set_fail_after(0); + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 0, inner_cursor: None, started_at: 0u32.into() }; + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::UpgradeFailed {}.into()); + + Ok(()) + } + + #[benchmark] + fn on_init_loop() { + T::Migrations::set_fail_after(0); // Should not be called anyway. + System::::set_block_number(1u32.into()); + Pallet::::on_runtime_upgrade(); + + #[block] + { + Pallet::::on_initialize(1u32.into()); + } + } + + #[benchmark] + fn force_set_cursor() { + #[extrinsic_call] + _(RawOrigin::Root, Some(cursor::())); + } + + #[benchmark] + fn force_set_active_cursor() { + #[extrinsic_call] + _(RawOrigin::Root, 0, None, None); + } + + #[benchmark] + fn force_onboard_mbms() { + #[extrinsic_call] + _(RawOrigin::Root); + } + + #[benchmark] + fn clear_historic(n: Linear<0, { DEFAULT_HISTORIC_BATCH_CLEAR_SIZE * 2 }>) { + let id_max_len = ::IdentifierMaxLen::get(); + assert!(id_max_len >= 4, "Precondition violated"); + + for i in 0..DEFAULT_HISTORIC_BATCH_CLEAR_SIZE * 2 { + let id = IdentifierOf::::truncate_from( + i.encode().into_iter().cycle().take(id_max_len as usize).collect::>(), + ); + + Historic::::insert(&id, ()); + } + + #[extrinsic_call] + _( + RawOrigin::Root, + HistoricCleanupSelector::Wildcard { limit: n.into(), previous_cursor: None }, + ); + } + + fn cursor() -> CursorOf { + // Note: The weight of a function can depend on the weight of reading the `inner_cursor`. + // `Cursor` is a user provided type. Now instead of requiring something like `Cursor: + // From`, we instead rely on the fact that it is MEL and the PoV benchmarking will + // therefore already take the MEL bound, even when the cursor in storage is `None`. + MigrationCursor::Active(ActiveCursor { + index: u32::MAX, + inner_cursor: None, + started_at: 0u32.into(), + }) + } + + // Implements a test for each benchmark. Execute with: + // `cargo test -p pallet-migrations --features runtime-benchmarks`. + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/substrate/frame/migrations/src/lib.rs b/substrate/frame/migrations/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..cd57d89f440f79d39f98a3da81ed57b6cb5bfcf5 --- /dev/null +++ b/substrate/frame/migrations/src/lib.rs @@ -0,0 +1,746 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![deny(missing_docs)] +#![deny(rustdoc::broken_intra_doc_links)] + +//! # `pallet-migrations` +//! +//! Provides multi block migrations for FRAME runtimes. +//! +//! ## Overview +//! +//! The pallet takes care of executing a batch of multi-step migrations over multiple blocks. The +//! process starts on each runtime upgrade. Normal and operational transactions are paused while +//! migrations are on-going. +//! +//! ### Example +//! +//! This example demonstrates a simple mocked walk through of a basic success scenario. The pallet +//! is configured with two migrations: one succeeding after just one step, and the second one +//! succeeding after two steps. A runtime upgrade is then enacted and the block number is advanced +//! until all migrations finish executing. Afterwards, the recorded historic migrations are +//! checked and events are asserted. +#![doc = docify::embed!("substrate/frame/migrations/src/tests.rs", simple_works)] +//! +//! ## Pallet API +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events and errors. +//! +//! Otherwise noteworthy API of this pallet include its implementation of the +//! [`MultiStepMigrator`] trait. This must be plugged into +//! [`frame_system::Config::MultiBlockMigrator`] for proper function. +//! +//! The API contains some calls for emergency management. They are all prefixed with `force_` and +//! should normally not be needed. Pay special attention prior to using them. +//! +//! ### Design Goals +//! +//! 1. Must automatically execute migrations over multiple blocks. +//! 2. Must expose information about whether migrations are ongoing. +//! 3. Must respect pessimistic weight bounds of migrations. +//! 4. Must execute migrations in order. Skipping is not allowed; migrations are run on a +//! all-or-nothing basis. +//! 5. Must prevent re-execution of past migrations. +//! 6. Must provide transactional storage semantics for migrations. +//! 7. Must guarantee progress. +//! +//! ### Design +//! +//! Migrations are provided to the pallet through the associated type [`Config::Migrations`] of type +//! [`SteppedMigrations`]. This allows multiple migrations to be aggregated through a tuple. It +//! simplifies the trait bounds since all associated types of the trait must be provided by the +//! pallet. The actual progress of the pallet is stored in the [`Cursor`] storage item. This can +//! either be [`MigrationCursor::Active`] or [`MigrationCursor::Stuck`]. In the active case it +//! points to the currently active migration and stores its inner cursor. The inner cursor can then +//! be used by the migration to store its inner state and advance. Each time when the migration +//! returns `Some(cursor)`, it signals the pallet that it is not done yet. +//! The cursor is reset on each runtime upgrade. This ensures that it starts to execute at the +//! first migration in the vector. The pallets cursor is only ever incremented or set to `Stuck` +//! once it encounters an error (Goal 4). Once in the stuck state, the pallet will stay stuck until +//! it is fixed through manual governance intervention. +//! As soon as the cursor of the pallet becomes `Some(_)`; [`MultiStepMigrator::ongoing`] returns +//! `true` (Goal 2). This can be used by upstream code to possibly pause transactions. +//! In `on_initialize` the pallet will load the current migration and check whether it was already +//! executed in the past by checking for membership of its ID in the [`Historic`] set. Historic +//! migrations are skipped without causing an error. Each successfully executed migration is added +//! to this set (Goal 5). +//! This proceeds until no more migrations remain. At that point, the event `UpgradeCompleted` is +//! emitted (Goal 1). +//! The execution of each migration happens by calling [`SteppedMigration::transactional_step`]. +//! This function wraps the inner `step` function into a transactional layer to allow rollback in +//! the error case (Goal 6). +//! Weight limits must be checked by the migration itself. The pallet provides a [`WeightMeter`] for +//! that purpose. The pallet may return [`SteppedMigrationError::InsufficientWeight`] at any point. +//! In that scenario, one of two things will happen: if that migration was exclusively executed +//! in this block, and therefore required more than the maximum amount of weight possible, the +//! process becomes `Stuck`. Otherwise, one re-attempt is executed with the same logic in the next +//! block (Goal 3). Progress through the migrations is guaranteed by providing a timeout for each +//! migration via [`SteppedMigration::max_steps`]. The pallet **ONLY** guarantees progress if this +//! is set to sensible limits (Goal 7). +//! +//! ### Scenario: Governance cleanup +//! +//! Every now and then, governance can make use of the [`clear_historic`][Pallet::clear_historic] +//! call. This ensures that no old migrations pile up in the [`Historic`] set. This can be done very +//! rarely, since the storage should not grow quickly and the lookup weight does not suffer much. +//! Another possibility would be to have a synchronous single-block migration perpetually deployed +//! that cleans them up before the MBMs start. +//! +//! ### Scenario: Successful upgrade +//! +//! The standard procedure for a successful runtime upgrade can look like this: +//! 1. Migrations are configured in the `Migrations` config item. All migrations expose +//! [`max_steps`][SteppedMigration::max_steps], are error tolerant, check their weight bounds and +//! have a unique identifier. +//! 2. The runtime upgrade is enacted. An `UpgradeStarted` event is +//! followed by lots of `MigrationAdvanced` and `MigrationCompleted` events. Finally +//! `UpgradeCompleted` is emitted. +//! 3. Cleanup as described in the governance scenario be executed at any time after the migrations +//! completed. +//! +//! ### Advice: Failed upgrades +//! +//! Failed upgrades cannot be recovered from automatically and require governance intervention. Set +//! up monitoring for `UpgradeFailed` events to be made aware of any failures. The hook +//! [`FailedMigrationHandler::failed`] should be setup in a way that it allows governance to act, +//! but still prevent other transactions from interacting with the inconsistent storage state. Note +//! that this is paramount, since the inconsistent state might contain a faulty balance amount or +//! similar that could cause great harm if user transactions don't remain suspended. One way to +//! implement this would be to use the `SafeMode` or `TxPause` pallets that can prevent most user +//! interactions but still allow a whitelisted set of governance calls. +//! +//! ### Remark: Failed migrations +//! +//! Failed migrations are not added to the `Historic` set. This means that an erroneous +//! migration must be removed and fixed manually. This already applies, even before considering the +//! historic set. +//! +//! ### Remark: Transactional processing +//! +//! You can see the transactional semantics for migration steps as mostly useless, since in the +//! stuck case the state is already messed up. This just prevents it from becoming even more messed +//! up, but doesn't prevent it in the first place. + +#![cfg_attr(not(feature = "std"), no_std)] + +mod benchmarking; +mod mock; +pub mod mock_helpers; +mod tests; +pub mod weights; + +pub use pallet::*; +pub use weights::WeightInfo; + +use codec::{Decode, Encode, MaxEncodedLen}; +use core::ops::ControlFlow; +use frame_support::{ + defensive, defensive_assert, + migrations::*, + traits::Get, + weights::{Weight, WeightMeter}, + BoundedVec, +}; +use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System}; +use sp_runtime::Saturating; +use sp_std::vec::Vec; + +/// Points to the next migration to execute. +#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)] +pub enum MigrationCursor { + /// Points to the currently active migration and its inner cursor. + Active(ActiveCursor), + + /// Migration got stuck and cannot proceed. This is bad. + Stuck, +} + +impl MigrationCursor { + /// Try to return self as an [`ActiveCursor`]. + pub fn as_active(&self) -> Option<&ActiveCursor> { + match self { + MigrationCursor::Active(active) => Some(active), + MigrationCursor::Stuck => None, + } + } +} + +impl From> + for MigrationCursor +{ + fn from(active: ActiveCursor) -> Self { + MigrationCursor::Active(active) + } +} + +/// Points to the currently active migration and its inner cursor. +#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)] +pub struct ActiveCursor { + /// The index of the migration in the MBM tuple. + pub index: u32, + /// The cursor of the migration that is referenced by `index`. + pub inner_cursor: Option, + /// The block number that the migration started at. + /// + /// This is used to calculate how many blocks it took. + pub started_at: BlockNumber, +} + +impl ActiveCursor { + /// Advance the overarching cursor to the next migration. + pub(crate) fn goto_next_migration(&mut self, current_block: BlockNumber) { + self.index.saturating_inc(); + self.inner_cursor = None; + self.started_at = current_block; + } +} + +/// How to clear the records of historic migrations. +#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, scale_info::TypeInfo)] +pub enum HistoricCleanupSelector { + /// Clear exactly these entries. + /// + /// This is the advised way of doing it. + Specific(Vec), + + /// Clear up to this many entries + Wildcard { + /// How many should be cleared in this call at most. + limit: Option, + /// The cursor that was emitted from any previous `HistoricCleared`. + /// + /// Does not need to be passed when clearing the first batch. + previous_cursor: Option>, + }, +} + +/// The default number of entries that should be cleared by a `HistoricCleanupSelector::Wildcard`. +/// +/// The caller can explicitly specify a higher amount. Benchmarks are run with twice this value. +const DEFAULT_HISTORIC_BATCH_CLEAR_SIZE: u32 = 128; + +impl HistoricCleanupSelector { + /// The maximal number of entries that this will remove. + /// + /// Needed for weight calculation. + pub fn limit(&self) -> u32 { + match self { + Self::Specific(ids) => ids.len() as u32, + Self::Wildcard { limit, .. } => limit.unwrap_or(DEFAULT_HISTORIC_BATCH_CLEAR_SIZE), + } + } +} + +/// Convenience alias for [`MigrationCursor`]. +pub type CursorOf = MigrationCursor, BlockNumberFor>; + +/// Convenience alias for the raw inner cursor of a migration. +pub type RawCursorOf = BoundedVec::CursorMaxLen>; + +/// Convenience alias for the identifier of a migration. +pub type IdentifierOf = BoundedVec::IdentifierMaxLen>; + +/// Convenience alias for [`ActiveCursor`]. +pub type ActiveCursorOf = ActiveCursor, BlockNumberFor>; + +/// Trait for a tuple of No-OP migrations with one element. +pub trait MockedMigrations: SteppedMigrations { + /// The migration should fail after `n` steps. + fn set_fail_after(n: u32); + /// The migration should succeed after `n` steps. + fn set_success_after(n: u32); +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type of the runtime. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// All the multi-block migrations to run. + /// + /// Should only be updated in a runtime-upgrade once all the old migrations have completed. + /// (Check that [`Cursor`] is `None`). + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations: SteppedMigrations; + + /// Mocked migrations for benchmarking only. + /// + /// Should be configured to [`crate::mock_helpers::MockedMigrations`] in benchmarks. + #[cfg(feature = "runtime-benchmarks")] + type Migrations: MockedMigrations; + + /// The maximal length of an encoded cursor. + /// + /// A good default needs to selected such that no migration will ever have a cursor with MEL + /// above this limit. This is statically checked in `integrity_test`. + #[pallet::constant] + type CursorMaxLen: Get; + + /// The maximal length of an encoded identifier. + /// + /// A good default needs to selected such that no migration will ever have an identifier + /// with MEL above this limit. This is statically checked in `integrity_test`. + #[pallet::constant] + type IdentifierMaxLen: Get; + + /// Notifications for status updates of a runtime upgrade. + /// + /// Could be used to pause XCM etc. + type MigrationStatusHandler: MigrationStatusHandler; + + /// Handler for failed migrations. + type FailedMigrationHandler: FailedMigrationHandler; + + /// The maximum weight to spend each block to execute migrations. + type MaxServiceWeight: Get; + + /// Weight information for the calls and functions of this pallet. + type WeightInfo: WeightInfo; + } + + /// The currently active migration to run and its cursor. + /// + /// `None` indicates that no migration is running. + #[pallet::storage] + pub type Cursor = StorageValue<_, CursorOf, OptionQuery>; + + /// Set of all successfully executed migrations. + /// + /// This is used as blacklist, to not re-execute migrations that have not been removed from the + /// codebase yet. Governance can regularly clear this out via `clear_historic`. + #[pallet::storage] + pub type Historic = StorageMap<_, Twox64Concat, IdentifierOf, (), OptionQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A Runtime upgrade started. + /// + /// Its end is indicated by `UpgradeCompleted` or `UpgradeFailed`. + UpgradeStarted { + /// The number of migrations that this upgrade contains. + /// + /// This can be used to design a progress indicator in combination with counting the + /// `MigrationCompleted` and `MigrationSkipped` events. + migrations: u32, + }, + /// The current runtime upgrade completed. + /// + /// This implies that all of its migrations completed successfully as well. + UpgradeCompleted, + /// Runtime upgrade failed. + /// + /// This is very bad and will require governance intervention. + UpgradeFailed, + /// A migration was skipped since it was already executed in the past. + MigrationSkipped { + /// The index of the skipped migration within the [`Config::Migrations`] list. + index: u32, + }, + /// A migration progressed. + MigrationAdvanced { + /// The index of the migration within the [`Config::Migrations`] list. + index: u32, + /// The number of blocks that this migration took so far. + took: BlockNumberFor, + }, + /// A Migration completed. + MigrationCompleted { + /// The index of the migration within the [`Config::Migrations`] list. + index: u32, + /// The number of blocks that this migration took so far. + took: BlockNumberFor, + }, + /// A Migration failed. + /// + /// This implies that the whole upgrade failed and governance intervention is required. + MigrationFailed { + /// The index of the migration within the [`Config::Migrations`] list. + index: u32, + /// The number of blocks that this migration took so far. + took: BlockNumberFor, + }, + /// The set of historical migrations has been cleared. + HistoricCleared { + /// Should be passed to `clear_historic` in a successive call. + next_cursor: Option>, + }, + } + + #[pallet::error] + pub enum Error { + /// The operation cannot complete since some MBMs are ongoing. + Ongoing, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_runtime_upgrade() -> Weight { + Self::onboard_new_mbms() + } + + #[cfg(feature = "std")] + fn integrity_test() { + // Check that the migrations tuple is legit. + frame_support::assert_ok!(T::Migrations::integrity_test()); + + // Very important! Ensure that the pallet is configured in `System::Config`. + { + assert!(!Cursor::::exists(), "Externalities storage should be clean"); + assert!(!::MultiBlockMigrator::ongoing()); + + Cursor::::put(MigrationCursor::Stuck); + assert!(::MultiBlockMigrator::ongoing()); + + Cursor::::kill(); + } + + // The per-block service weight is sane. + #[cfg(not(test))] + { + let want = T::MaxServiceWeight::get(); + let max = ::BlockWeights::get().max_block; + + assert!(want.all_lte(max), "Service weight is larger than a block: {want} > {max}",); + } + + // Cursor MEL + { + let mel = T::Migrations::cursor_max_encoded_len(); + let max_mel = T::CursorMaxLen::get() as usize; + assert!( + mel <= max_mel, + "A Cursor is not guaranteed to fit into the storage: {mel} > {max_mel}", + ); + } + + // Identifier MEL + { + let mel = T::Migrations::identifier_max_encoded_len(); + let max_mel = T::IdentifierMaxLen::get() as usize; + assert!( + mel <= max_mel, + "An Identifier is not guaranteed to fit into the storage: {mel} > {max_mel}", + ); + } + } + } + + #[pallet::call(weight = T::WeightInfo)] + impl Pallet { + /// Allows root to set a cursor to forcefully start, stop or forward the migration process. + /// + /// Should normally not be needed and is only in place as emergency measure. Note that + /// restarting the migration process in this manner will not call the + /// [`MigrationStatusHandler::started`] hook or emit an `UpgradeStarted` event. + #[pallet::call_index(0)] + pub fn force_set_cursor( + origin: OriginFor, + cursor: Option>, + ) -> DispatchResult { + ensure_root(origin)?; + + Cursor::::set(cursor); + + Ok(()) + } + + /// Allows root to set an active cursor to forcefully start/forward the migration process. + /// + /// This is an edge-case version of [`Self::force_set_cursor`] that allows to set the + /// `started_at` value to the next block number. Otherwise this would not be possible, since + /// `force_set_cursor` takes an absolute block number. Setting `started_at` to `None` + /// indicates that the current block number plus one should be used. + #[pallet::call_index(1)] + pub fn force_set_active_cursor( + origin: OriginFor, + index: u32, + inner_cursor: Option>, + started_at: Option>, + ) -> DispatchResult { + ensure_root(origin)?; + + let started_at = started_at.unwrap_or( + System::::block_number().saturating_add(sp_runtime::traits::One::one()), + ); + Cursor::::put(MigrationCursor::Active(ActiveCursor { + index, + inner_cursor, + started_at, + })); + + Ok(()) + } + + /// Forces the onboarding of the migrations. + /// + /// This process happens automatically on a runtime upgrade. It is in place as an emergency + /// measurement. The cursor needs to be `None` for this to succeed. + #[pallet::call_index(2)] + pub fn force_onboard_mbms(origin: OriginFor) -> DispatchResult { + ensure_root(origin)?; + + ensure!(!Cursor::::exists(), Error::::Ongoing); + Self::onboard_new_mbms(); + + Ok(()) + } + + /// Clears the `Historic` set. + /// + /// `map_cursor` must be set to the last value that was returned by the + /// `HistoricCleared` event. The first time `None` can be used. `limit` must be chosen in a + /// way that will result in a sensible weight. + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::clear_historic(selector.limit()))] + pub fn clear_historic( + origin: OriginFor, + selector: HistoricCleanupSelector>, + ) -> DispatchResult { + ensure_root(origin)?; + + match &selector { + HistoricCleanupSelector::Specific(ids) => { + for id in ids { + Historic::::remove(id); + } + Self::deposit_event(Event::HistoricCleared { next_cursor: None }); + }, + HistoricCleanupSelector::Wildcard { previous_cursor, .. } => { + let next = Historic::::clear(selector.limit(), previous_cursor.as_deref()); + Self::deposit_event(Event::HistoricCleared { next_cursor: next.maybe_cursor }); + }, + } + + Ok(()) + } + } +} + +impl Pallet { + /// Onboard all new Multi-Block-Migrations and start the process of executing them. + /// + /// Should only be called once all previous migrations completed. + fn onboard_new_mbms() -> Weight { + if let Some(cursor) = Cursor::::get() { + log::error!("Ongoing migrations interrupted - chain stuck"); + + let maybe_index = cursor.as_active().map(|c| c.index); + Self::upgrade_failed(maybe_index); + return T::WeightInfo::onboard_new_mbms() + } + + let migrations = T::Migrations::len(); + log::debug!("Onboarding {migrations} new MBM migrations"); + + if migrations > 0 { + // Set the cursor to the first migration: + Cursor::::set(Some( + ActiveCursor { + index: 0, + inner_cursor: None, + started_at: System::::block_number(), + } + .into(), + )); + Self::deposit_event(Event::UpgradeStarted { migrations }); + T::MigrationStatusHandler::started(); + } + + T::WeightInfo::onboard_new_mbms() + } + + /// Tries to make progress on the Multi-Block-Migrations process. + fn progress_mbms(n: BlockNumberFor) -> Weight { + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + meter.consume(T::WeightInfo::progress_mbms_none()); + + let mut cursor = match Cursor::::get() { + None => { + log::trace!("[Block {n:?}] Waiting for cursor to become `Some`."); + return meter.consumed() + }, + Some(MigrationCursor::Active(cursor)) => { + log::debug!("Progressing MBM #{}", cursor.index); + cursor + }, + Some(MigrationCursor::Stuck) => { + log::error!("Migration stuck. Governance intervention required."); + return meter.consumed() + }, + }; + debug_assert!(Self::ongoing()); + + // The limit here is a defensive measure to prevent an infinite loop. It expresses that we + // allow no more than 8 MBMs to finish in a single block. This should be harmless, since we + // generally expect *Multi*-Block-Migrations to take *multiple* blocks. + for i in 0..8 { + match Self::exec_migration(cursor, i == 0, &mut meter) { + None => return meter.consumed(), + Some(ControlFlow::Continue(next_cursor)) => { + cursor = next_cursor; + }, + Some(ControlFlow::Break(last_cursor)) => { + cursor = last_cursor; + break + }, + } + } + + Cursor::::set(Some(cursor.into())); + + meter.consumed() + } + + /// Try to make progress on the current migration. + /// + /// Returns whether processing should continue or break for this block. The return value means: + /// - `None`: The migration process is completely finished. + /// - `ControlFlow::Break`: Continue in the *next* block with the given cursor. + /// - `ControlFlow::Continue`: Continue in the *current* block with the given cursor. + fn exec_migration( + mut cursor: ActiveCursorOf, + is_first: bool, + meter: &mut WeightMeter, + ) -> Option, ActiveCursorOf>> { + // The differences between the single branches' weights is not that big. And since we do + // only one step per block, we can just use the maximum instead of more precise accounting. + if meter.try_consume(Self::exec_migration_max_weight()).is_err() { + defensive_assert!(!is_first, "There should be enough weight to do this at least once"); + return Some(ControlFlow::Break(cursor)) + } + + let Some(id) = T::Migrations::nth_id(cursor.index) else { + // No more migrations in the tuple - we are done. + defensive_assert!(cursor.index == T::Migrations::len(), "Inconsistent MBMs tuple"); + Self::deposit_event(Event::UpgradeCompleted); + Cursor::::kill(); + T::MigrationStatusHandler::completed(); + return None; + }; + + let Ok(bounded_id): Result, _> = id.try_into() else { + defensive!("integrity_test ensures that all identifiers' MEL bounds fit into CursorMaxLen; qed."); + Self::upgrade_failed(Some(cursor.index)); + return None + }; + + if Historic::::contains_key(&bounded_id) { + Self::deposit_event(Event::MigrationSkipped { index: cursor.index }); + cursor.goto_next_migration(System::::block_number()); + return Some(ControlFlow::Continue(cursor)) + } + + let max_steps = T::Migrations::nth_max_steps(cursor.index); + let next_cursor = T::Migrations::nth_transactional_step( + cursor.index, + cursor.inner_cursor.clone().map(|c| c.into_inner()), + meter, + ); + let Some((max_steps, next_cursor)) = max_steps.zip(next_cursor) else { + defensive!("integrity_test ensures that the tuple is valid; qed"); + Self::upgrade_failed(Some(cursor.index)); + return None + }; + + let took = System::::block_number().saturating_sub(cursor.started_at); + match next_cursor { + Ok(Some(next_cursor)) => { + let Ok(bound_next_cursor) = next_cursor.try_into() else { + defensive!("The integrity check ensures that all cursors' MEL bound fits into CursorMaxLen; qed"); + Self::upgrade_failed(Some(cursor.index)); + return None + }; + + Self::deposit_event(Event::MigrationAdvanced { index: cursor.index, took }); + cursor.inner_cursor = Some(bound_next_cursor); + + if max_steps.map_or(false, |max| took > max.into()) { + Self::deposit_event(Event::MigrationFailed { index: cursor.index, took }); + Self::upgrade_failed(Some(cursor.index)); + None + } else { + // A migration cannot progress more than one step per block, we therefore break. + Some(ControlFlow::Break(cursor)) + } + }, + Ok(None) => { + // A migration is done when it returns cursor `None`. + Self::deposit_event(Event::MigrationCompleted { index: cursor.index, took }); + Historic::::insert(&bounded_id, ()); + cursor.goto_next_migration(System::::block_number()); + Some(ControlFlow::Continue(cursor)) + }, + Err(SteppedMigrationError::InsufficientWeight { required }) => { + if is_first || required.any_gt(meter.limit()) { + Self::deposit_event(Event::MigrationFailed { index: cursor.index, took }); + Self::upgrade_failed(Some(cursor.index)); + None + } else { + // Retry and hope that there is more weight in the next block. + Some(ControlFlow::Break(cursor)) + } + }, + Err(SteppedMigrationError::InvalidCursor | SteppedMigrationError::Failed) => { + Self::deposit_event(Event::MigrationFailed { index: cursor.index, took }); + Self::upgrade_failed(Some(cursor.index)); + None + }, + } + } + + /// Fail the current runtime upgrade, caused by `migration`. + fn upgrade_failed(migration: Option) { + use FailedMigrationHandling::*; + Self::deposit_event(Event::UpgradeFailed); + + match T::FailedMigrationHandler::failed(migration) { + KeepStuck => Cursor::::set(Some(MigrationCursor::Stuck)), + ForceUnstuck => Cursor::::kill(), + Ignore => {}, + } + } + + fn exec_migration_max_weight() -> Weight { + T::WeightInfo::exec_migration_complete() + .max(T::WeightInfo::exec_migration_completed()) + .max(T::WeightInfo::exec_migration_skipped_historic()) + .max(T::WeightInfo::exec_migration_advance()) + .max(T::WeightInfo::exec_migration_fail()) + } +} + +impl MultiStepMigrator for Pallet { + fn ongoing() -> bool { + Cursor::::exists() + } + + fn step() -> Weight { + Self::progress_mbms(System::::block_number()) + } +} diff --git a/substrate/frame/migrations/src/mock.rs b/substrate/frame/migrations/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..3729264755442a008641668b0a14214764d255b9 --- /dev/null +++ b/substrate/frame/migrations/src/mock.rs @@ -0,0 +1,163 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Mocked runtime for testing the migrations pallet. + +#![cfg(test)] + +use crate::{mock_helpers::*, Event, Historic}; + +use frame_support::{ + derive_impl, + migrations::*, + traits::{OnFinalize, OnInitialize}, + weights::Weight, +}; +use frame_system::EventRecord; +use sp_core::{ConstU32, H256}; + +type Block = frame_system::mocking::MockBlock; + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Test { + System: frame_system, + Migrations: crate, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; + type PalletInfo = PalletInfo; + type MultiBlockMigrator = Migrations; +} + +frame_support::parameter_types! { + pub const MaxServiceWeight: Weight = Weight::MAX.div(10); +} + +impl crate::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Migrations = MockedMigrations; + type CursorMaxLen = ConstU32<65_536>; + type IdentifierMaxLen = ConstU32<256>; + type MigrationStatusHandler = MockedMigrationStatusHandler; + type FailedMigrationHandler = MockedFailedMigrationHandler; + type MaxServiceWeight = MaxServiceWeight; + type WeightInfo = (); +} + +frame_support::parameter_types! { + /// The number of started upgrades. + pub static UpgradesStarted: u32 = 0; + /// The number of completed upgrades. + pub static UpgradesCompleted: u32 = 0; + /// The migrations that failed. + pub static UpgradesFailed: Vec> = vec![]; + /// Return value of [`MockedFailedMigrationHandler::failed`]. + pub static FailedUpgradeResponse: FailedMigrationHandling = FailedMigrationHandling::KeepStuck; +} + +/// Records all started and completed upgrades in `UpgradesStarted` and `UpgradesCompleted`. +pub struct MockedMigrationStatusHandler; +impl MigrationStatusHandler for MockedMigrationStatusHandler { + fn started() { + log::info!("MigrationStatusHandler started"); + UpgradesStarted::mutate(|v| *v += 1); + } + + fn completed() { + log::info!("MigrationStatusHandler completed"); + UpgradesCompleted::mutate(|v| *v += 1); + } +} + +/// Records all failed upgrades in `UpgradesFailed`. +pub struct MockedFailedMigrationHandler; +impl FailedMigrationHandler for MockedFailedMigrationHandler { + fn failed(migration: Option) -> FailedMigrationHandling { + UpgradesFailed::mutate(|v| v.push(migration)); + let res = FailedUpgradeResponse::get(); + log::error!("FailedMigrationHandler failed at: {migration:?}, handling as {res:?}"); + res + } +} + +/// Returns the number of `(started, completed, failed)` upgrades and resets their numbers. +pub fn upgrades_started_completed_failed() -> (u32, u32, u32) { + (UpgradesStarted::take(), UpgradesCompleted::take(), UpgradesFailed::take().len() as u32) +} + +/// Build genesis storage according to the mock runtime. +pub fn new_test_ext() -> sp_io::TestExternalities { + sp_io::TestExternalities::new(Default::default()) +} + +/// Run this closure in test externalities. +pub fn test_closure(f: impl FnOnce() -> R) -> R { + let mut ext = new_test_ext(); + ext.execute_with(f) +} + +pub fn run_to_block(n: u32) { + while System::block_number() < n as u64 { + log::debug!("Block {}", System::block_number()); + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + Migrations::on_initialize(System::block_number()); + // Executive calls this: + ::step(); + + Migrations::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); + } +} + +/// Returns the historic migrations, sorted by their identifier. +pub fn historic() -> Vec { + let mut historic = Historic::::iter_keys().collect::>(); + historic.sort(); + historic +} + +// Traits to make using events less insufferable: +pub trait IntoRecord { + fn into_record(self) -> EventRecord<::RuntimeEvent, H256>; +} + +impl IntoRecord for Event { + fn into_record(self) -> EventRecord<::RuntimeEvent, H256> { + let re: ::RuntimeEvent = self.into(); + EventRecord { phase: frame_system::Phase::Initialization, event: re, topics: vec![] } + } +} + +pub trait IntoRecords { + fn into_records(self) -> Vec::RuntimeEvent, H256>>; +} + +impl IntoRecords for Vec { + fn into_records(self) -> Vec::RuntimeEvent, H256>> { + self.into_iter().map(|e| e.into_record()).collect() + } +} + +pub fn assert_events(events: Vec) { + pretty_assertions::assert_eq!(events.into_records(), System::events()); + System::reset_events(); +} diff --git a/substrate/frame/migrations/src/mock_helpers.rs b/substrate/frame/migrations/src/mock_helpers.rs new file mode 100644 index 0000000000000000000000000000000000000000..c5e23efb4e314e6f5562c50521a533c82f94669e --- /dev/null +++ b/substrate/frame/migrations/src/mock_helpers.rs @@ -0,0 +1,142 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test helpers for internal and external usage. + +#![allow(missing_docs)] + +use codec::{Decode, Encode}; +use frame_support::{ + migrations::*, + weights::{Weight, WeightMeter}, +}; +use sp_core::ConstU32; +use sp_runtime::BoundedVec; +use sp_std::{vec, vec::Vec}; + +/// Opaque identifier of a migration. +pub type MockedIdentifier = BoundedVec>; + +/// How a mocked migration should behave. +#[derive(Debug, Clone, Copy, Encode, Decode)] +pub enum MockedMigrationKind { + /// Succeed after its number of steps elapsed. + SucceedAfter, + /// Fail after its number of steps elapsed. + FailAfter, + /// Never terminate. + TimeoutAfter, + /// Cause an [`SteppedMigrationError::InsufficientWeight`] error after its number of steps + /// elapsed. + HighWeightAfter(Weight), +} +use MockedMigrationKind::*; // C style + +/// Creates a migration identifier with a specific `kind` and `steps`. +pub fn mocked_id(kind: MockedMigrationKind, steps: u32) -> MockedIdentifier { + (b"MockedMigration", kind, steps).encode().try_into().unwrap() +} + +frame_support::parameter_types! { + /// The configs for the migrations to run. + storage MIGRATIONS: Vec<(MockedMigrationKind, u32)> = vec![]; +} + +/// Allows to set the migrations to run at runtime instead of compile-time. +/// +/// It achieves this by using the storage to store the migrations to run. +pub struct MockedMigrations; +impl SteppedMigrations for MockedMigrations { + fn len() -> u32 { + MIGRATIONS::get().len() as u32 + } + + fn nth_id(n: u32) -> Option> { + let k = MIGRATIONS::get().get(n as usize).copied(); + k.map(|(kind, steps)| mocked_id(kind, steps).into_inner()) + } + + fn nth_step( + n: u32, + cursor: Option>, + _meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + let (kind, steps) = MIGRATIONS::get()[n as usize]; + + let mut count: u32 = + cursor.as_ref().and_then(|c| Decode::decode(&mut &c[..]).ok()).unwrap_or(0); + log::debug!("MockedMigration: Step {}", count); + if count != steps || matches!(kind, TimeoutAfter) { + count += 1; + return Some(Ok(Some(count.encode()))) + } + + Some(match kind { + SucceedAfter => { + log::debug!("MockedMigration: Succeeded after {} steps", count); + Ok(None) + }, + HighWeightAfter(required) => { + log::debug!("MockedMigration: Not enough weight after {} steps", count); + Err(SteppedMigrationError::InsufficientWeight { required }) + }, + FailAfter => { + log::debug!("MockedMigration: Failed after {} steps", count); + Err(SteppedMigrationError::Failed) + }, + TimeoutAfter => unreachable!(), + }) + } + + fn nth_transactional_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + // This is a hack but should be fine. We dont need it in testing. + Self::nth_step(n, cursor, meter) + } + + fn nth_max_steps(n: u32) -> Option> { + MIGRATIONS::get().get(n as usize).map(|(_, s)| Some(*s)) + } + + fn cursor_max_encoded_len() -> usize { + 65_536 + } + + fn identifier_max_encoded_len() -> usize { + 256 + } +} + +impl MockedMigrations { + /// Set the migrations to run. + pub fn set(migrations: Vec<(MockedMigrationKind, u32)>) { + MIGRATIONS::set(&migrations); + } +} + +impl crate::MockedMigrations for MockedMigrations { + fn set_fail_after(steps: u32) { + MIGRATIONS::set(&vec![(FailAfter, steps)]); + } + + fn set_success_after(steps: u32) { + MIGRATIONS::set(&vec![(SucceedAfter, steps)]); + } +} diff --git a/substrate/frame/migrations/src/tests.rs b/substrate/frame/migrations/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..9c9043d37a62e5f1df3d12da979b68251621c9d0 --- /dev/null +++ b/substrate/frame/migrations/src/tests.rs @@ -0,0 +1,335 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(test)] + +use crate::{ + mock::{Test as T, *}, + mock_helpers::{MockedMigrationKind::*, *}, + Cursor, Event, FailedMigrationHandling, MigrationCursor, +}; +use frame_support::{pallet_prelude::Weight, traits::OnRuntimeUpgrade}; + +#[docify::export] +#[test] +fn simple_works() { + use Event::*; + test_closure(|| { + // Add three migrations, each taking one block longer than the previous. + MockedMigrations::set(vec![(SucceedAfter, 0), (SucceedAfter, 1), (SucceedAfter, 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Check that the executed migrations are recorded in `Historical`. + assert_eq!( + historic(), + vec![ + mocked_id(SucceedAfter, 0), + mocked_id(SucceedAfter, 1), + mocked_id(SucceedAfter, 2), + ] + ); + + // Check that we got all events. + assert_events(vec![ + UpgradeStarted { migrations: 3 }, + MigrationCompleted { index: 0, took: 1 }, + MigrationAdvanced { index: 1, took: 0 }, + MigrationCompleted { index: 1, took: 1 }, + MigrationAdvanced { index: 2, took: 0 }, + MigrationAdvanced { index: 2, took: 1 }, + MigrationCompleted { index: 2, took: 2 }, + UpgradeCompleted, + ]); + }); +} + +#[test] +fn failing_migration_sets_cursor_to_stuck() { + test_closure(|| { + FailedUpgradeResponse::set(FailedMigrationHandling::KeepStuck); + MockedMigrations::set(vec![(FailAfter, 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Failed migrations are not recorded in `Historical`. + assert!(historic().is_empty()); + // Check that we got all events. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + Event::MigrationFailed { index: 0, took: 3 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(UpgradesStarted::take(), 1); + assert_eq!(UpgradesCompleted::take(), 0); + assert_eq!(UpgradesFailed::take(), vec![Some(0)]); + + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck), "Must stuck the chain"); + }); +} + +#[test] +fn failing_migration_force_unstuck_works() { + test_closure(|| { + FailedUpgradeResponse::set(FailedMigrationHandling::ForceUnstuck); + MockedMigrations::set(vec![(FailAfter, 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Failed migrations are not recorded in `Historical`. + assert!(historic().is_empty()); + // Check that we got all events. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + Event::MigrationFailed { index: 0, took: 3 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(UpgradesStarted::take(), 1); + assert_eq!(UpgradesCompleted::take(), 0); + assert_eq!(UpgradesFailed::take(), vec![Some(0)]); + + assert!(Cursor::::get().is_none(), "Must unstuck the chain"); + }); +} + +/// A migration that reports not getting enough weight errors if it is the first one to run in that +/// block. +#[test] +fn high_weight_migration_singular_fails() { + test_closure(|| { + MockedMigrations::set(vec![(HighWeightAfter(Weight::zero()), 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Failed migrations are not recorded in `Historical`. + assert!(historic().is_empty()); + // Check that we got all events. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + Event::MigrationFailed { index: 0, took: 3 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(upgrades_started_completed_failed(), (1, 0, 1)); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + }); +} + +/// A migration that reports of not getting enough weight is retried once, if it is not the first +/// one to run in a block. +#[test] +fn high_weight_migration_retries_once() { + test_closure(|| { + MockedMigrations::set(vec![(SucceedAfter, 0), (HighWeightAfter(Weight::zero()), 0)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + assert_eq!(historic(), vec![mocked_id(SucceedAfter, 0)]); + // Check that we got all events. + assert_events::>(vec![ + Event::UpgradeStarted { migrations: 2 }, + Event::MigrationCompleted { index: 0, took: 1 }, + // `took=1` means that it was retried once. + Event::MigrationFailed { index: 1, took: 1 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(upgrades_started_completed_failed(), (1, 0, 1)); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + }); +} + +/// If a migration uses more weight than the limit, then it will not retry but fail even when it is +/// not the first one in the block. +// Note: Same as `high_weight_migration_retries_once` but with different required weight for the +// migration. +#[test] +fn high_weight_migration_permanently_overweight_fails() { + test_closure(|| { + MockedMigrations::set(vec![(SucceedAfter, 0), (HighWeightAfter(Weight::MAX), 0)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + assert_eq!(historic(), vec![mocked_id(SucceedAfter, 0)]); + // Check that we got all events. + assert_events::>(vec![ + Event::UpgradeStarted { migrations: 2 }, + Event::MigrationCompleted { index: 0, took: 1 }, + // `blocks=0` means that it was not retried. + Event::MigrationFailed { index: 1, took: 0 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(upgrades_started_completed_failed(), (1, 0, 1)); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + }); +} + +#[test] +fn historic_skipping_works() { + test_closure(|| { + MockedMigrations::set(vec![ + (SucceedAfter, 0), + (SucceedAfter, 0), // duplicate + (SucceedAfter, 1), + (SucceedAfter, 2), + (SucceedAfter, 1), // duplicate + ]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Just three historical ones, since two were added twice. + assert_eq!( + historic(), + vec![ + mocked_id(SucceedAfter, 0), + mocked_id(SucceedAfter, 1), + mocked_id(SucceedAfter, 2), + ] + ); + // Events received. + assert_events(vec![ + Event::UpgradeStarted { migrations: 5 }, + Event::MigrationCompleted { index: 0, took: 1 }, + Event::MigrationSkipped { index: 1 }, + Event::MigrationAdvanced { index: 2, took: 0 }, + Event::MigrationCompleted { index: 2, took: 1 }, + Event::MigrationAdvanced { index: 3, took: 0 }, + Event::MigrationAdvanced { index: 3, took: 1 }, + Event::MigrationCompleted { index: 3, took: 2 }, + Event::MigrationSkipped { index: 4 }, + Event::UpgradeCompleted, + ]); + assert_eq!(upgrades_started_completed_failed(), (1, 1, 0)); + + // Now go for another upgrade; just to make sure that it wont execute again. + System::reset_events(); + Migrations::on_runtime_upgrade(); + run_to_block(20); + + // Same historical ones as before. + assert_eq!( + historic(), + vec![ + mocked_id(SucceedAfter, 0), + mocked_id(SucceedAfter, 1), + mocked_id(SucceedAfter, 2), + ] + ); + + // Everything got skipped. + assert_events(vec![ + Event::UpgradeStarted { migrations: 5 }, + Event::MigrationSkipped { index: 0 }, + Event::MigrationSkipped { index: 1 }, + Event::MigrationSkipped { index: 2 }, + Event::MigrationSkipped { index: 3 }, + Event::MigrationSkipped { index: 4 }, + Event::UpgradeCompleted, + ]); + assert_eq!(upgrades_started_completed_failed(), (1, 1, 0)); + }); +} + +/// When another upgrade happens while a migration is still running, it should set the cursor to +/// stuck. +#[test] +fn upgrade_fails_when_migration_active() { + test_closure(|| { + MockedMigrations::set(vec![(SucceedAfter, 10)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(3); + + // Events received. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + ]); + assert_eq!(upgrades_started_completed_failed(), (1, 0, 0)); + + // Upgrade again. + Migrations::on_runtime_upgrade(); + // -- Defensive path -- + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + assert_events(vec![Event::UpgradeFailed]); + assert_eq!(upgrades_started_completed_failed(), (0, 0, 1)); + }); +} + +#[test] +fn migration_timeout_errors() { + test_closure(|| { + MockedMigrations::set(vec![(TimeoutAfter, 3)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(5); + + // Times out after taking more than 3 steps. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + Event::MigrationAdvanced { index: 0, took: 3 }, + Event::MigrationAdvanced { index: 0, took: 4 }, + Event::MigrationFailed { index: 0, took: 4 }, + Event::UpgradeFailed, + ]); + assert_eq!(upgrades_started_completed_failed(), (1, 0, 1)); + + // Failed migrations are not black-listed. + assert!(historic().is_empty()); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + + Migrations::on_runtime_upgrade(); + run_to_block(6); + + assert_events(vec![Event::UpgradeFailed]); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + assert_eq!(upgrades_started_completed_failed(), (0, 0, 1)); + }); +} diff --git a/substrate/frame/migrations/src/weights.rs b/substrate/frame/migrations/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..9eca48ac3fd9ca8663a0b54a0258591e9b4023aa --- /dev/null +++ b/substrate/frame/migrations/src/weights.rs @@ -0,0 +1,361 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_migrations` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_migrations +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/migrations/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_migrations`. +pub trait WeightInfo { + fn onboard_new_mbms() -> Weight; + fn progress_mbms_none() -> Weight; + fn exec_migration_completed() -> Weight; + fn exec_migration_skipped_historic() -> Weight; + fn exec_migration_advance() -> Weight; + fn exec_migration_complete() -> Weight; + fn exec_migration_fail() -> Weight; + fn on_init_loop() -> Weight; + fn force_set_cursor() -> Weight; + fn force_set_active_cursor() -> Weight; + fn force_onboard_mbms() -> Weight; + fn clear_historic(n: u32, ) -> Weight; +} + +/// Weights for `pallet_migrations` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn onboard_new_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `67035` + // Minimum execution time: 7_932_000 picoseconds. + Weight::from_parts(8_428_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn progress_mbms_none() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `67035` + // Minimum execution time: 2_229_000 picoseconds. + Weight::from_parts(2_329_000, 67035) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_completed() -> Weight { + // Proof Size summary in bytes: + // Measured: `134` + // Estimated: `3599` + // Minimum execution time: 6_051_000 picoseconds. + Weight::from_parts(6_483_000, 3599) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_skipped_historic() -> Weight { + // Proof Size summary in bytes: + // Measured: `330` + // Estimated: `3795` + // Minimum execution time: 10_066_000 picoseconds. + Weight::from_parts(10_713_000, 3795) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_advance() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 10_026_000 picoseconds. + Weight::from_parts(10_379_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_complete() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 11_680_000 picoseconds. + Weight::from_parts(12_184_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 12_334_000 picoseconds. + Weight::from_parts(12_899_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + fn on_init_loop() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 187_000 picoseconds. + Weight::from_parts(209_000, 0) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_688_000 picoseconds. + Weight::from_parts(2_874_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_active_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_108_000 picoseconds. + Weight::from_parts(3_263_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn force_onboard_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `67035` + // Minimum execution time: 5_993_000 picoseconds. + Weight::from_parts(6_359_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 256]`. + fn clear_historic(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1122 + n * (271 ±0)` + // Estimated: `3834 + n * (2740 ±0)` + // Minimum execution time: 16_003_000 picoseconds. + Weight::from_parts(14_453_014, 3834) + // Standard Error: 3_305 + .saturating_add(Weight::from_parts(1_325_026, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2740).saturating_mul(n.into())) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn onboard_new_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `67035` + // Minimum execution time: 7_932_000 picoseconds. + Weight::from_parts(8_428_000, 67035) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn progress_mbms_none() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `67035` + // Minimum execution time: 2_229_000 picoseconds. + Weight::from_parts(2_329_000, 67035) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_completed() -> Weight { + // Proof Size summary in bytes: + // Measured: `134` + // Estimated: `3599` + // Minimum execution time: 6_051_000 picoseconds. + Weight::from_parts(6_483_000, 3599) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_skipped_historic() -> Weight { + // Proof Size summary in bytes: + // Measured: `330` + // Estimated: `3795` + // Minimum execution time: 10_066_000 picoseconds. + Weight::from_parts(10_713_000, 3795) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_advance() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 10_026_000 picoseconds. + Weight::from_parts(10_379_000, 3741) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_complete() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 11_680_000 picoseconds. + Weight::from_parts(12_184_000, 3741) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 12_334_000 picoseconds. + Weight::from_parts(12_899_000, 3741) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + fn on_init_loop() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 187_000 picoseconds. + Weight::from_parts(209_000, 0) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_688_000 picoseconds. + Weight::from_parts(2_874_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_active_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_108_000 picoseconds. + Weight::from_parts(3_263_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn force_onboard_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `67035` + // Minimum execution time: 5_993_000 picoseconds. + Weight::from_parts(6_359_000, 67035) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 256]`. + fn clear_historic(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1122 + n * (271 ±0)` + // Estimated: `3834 + n * (2740 ±0)` + // Minimum execution time: 16_003_000 picoseconds. + Weight::from_parts(14_453_014, 3834) + // Standard Error: 3_305 + .saturating_add(Weight::from_parts(1_325_026, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2740).saturating_mul(n.into())) + } +} diff --git a/substrate/frame/multisig/src/lib.rs b/substrate/frame/multisig/src/lib.rs index e4426c64b4125fe267a1e30a42a7196274b05e4e..a83b78e316f500ddc9c615420c2ac627b90ee7e9 100644 --- a/substrate/frame/multisig/src/lib.rs +++ b/substrate/frame/multisig/src/lib.rs @@ -168,7 +168,7 @@ pub mod pallet { type WeightInfo: WeightInfo; } - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/multisig/src/migrations.rs b/substrate/frame/multisig/src/migrations.rs index d03e42a66a5b5af07248ee7b11061d5f26b66c98..e6402600d0d368783e30f43e0afc3adad4d0a1ba 100644 --- a/substrate/frame/multisig/src/migrations.rs +++ b/substrate/frame/multisig/src/migrations.rs @@ -51,7 +51,7 @@ pub mod v1 { fn on_runtime_upgrade() -> Weight { use sp_runtime::Saturating; - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); if onchain > 0 { diff --git a/substrate/frame/multisig/src/weights.rs b/substrate/frame/multisig/src/weights.rs index 7b87d258d383d43aaf8ef8aaddc0546c60b5813e..4cbe7bb03b7d5fecc8b9b03edab6613c1d42f34e 100644 --- a/substrate/frame/multisig/src/weights.rs +++ b/substrate/frame/multisig/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_multisig +//! Autogenerated weights for `pallet_multisig` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/multisig/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/multisig/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_multisig. +/// Weight functions needed for `pallet_multisig`. pub trait WeightInfo { fn as_multi_threshold_1(z: u32, ) -> Weight; fn as_multi_create(s: u32, z: u32, ) -> Weight; @@ -61,220 +60,238 @@ pub trait WeightInfo { fn cancel_as_multi(s: u32, ) -> Weight; } -/// Weights for pallet_multisig using the Substrate node and recommended hardware. +/// Weights for `pallet_multisig` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `z` is `[0, 10000]`. fn as_multi_threshold_1(z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_452_000 picoseconds. - Weight::from_parts(14_425_869, 0) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 18_893_000 picoseconds. + Weight::from_parts(20_278_307, 3997) // Standard Error: 4 - .saturating_add(Weight::from_parts(493, 0).saturating_mul(z.into())) + .saturating_add(Weight::from_parts(488, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `301 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 46_012_000 picoseconds. - Weight::from_parts(34_797_344, 6811) - // Standard Error: 833 - .saturating_add(Weight::from_parts(127_671, 0).saturating_mul(s.into())) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_498, 0).saturating_mul(z.into())) + // Minimum execution time: 39_478_000 picoseconds. + Weight::from_parts(29_195_487, 6811) + // Standard Error: 739 + .saturating_add(Weight::from_parts(118_766, 0).saturating_mul(s.into())) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_511, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[3, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `320` // Estimated: `6811` - // Minimum execution time: 29_834_000 picoseconds. - Weight::from_parts(20_189_154, 6811) - // Standard Error: 637 - .saturating_add(Weight::from_parts(110_080, 0).saturating_mul(s.into())) - // Standard Error: 6 - .saturating_add(Weight::from_parts(1_483, 0).saturating_mul(z.into())) + // Minimum execution time: 26_401_000 picoseconds. + Weight::from_parts(17_277_296, 6811) + // Standard Error: 492 + .saturating_add(Weight::from_parts(101_763, 0).saturating_mul(s.into())) + // Standard Error: 4 + .saturating_add(Weight::from_parts(1_486, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `426 + s * (33 ±0)` + // Measured: `571 + s * (33 ±0)` // Estimated: `6811` - // Minimum execution time: 51_464_000 picoseconds. - Weight::from_parts(39_246_644, 6811) - // Standard Error: 1_251 - .saturating_add(Weight::from_parts(143_313, 0).saturating_mul(s.into())) + // Minimum execution time: 52_430_000 picoseconds. + Weight::from_parts(40_585_478, 6811) + // Standard Error: 1_240 + .saturating_add(Weight::from_parts(161_405, 0).saturating_mul(s.into())) // Standard Error: 12 - .saturating_add(Weight::from_parts(1_523, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(Weight::from_parts(1_547, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `301 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 33_275_000 picoseconds. - Weight::from_parts(34_073_221, 6811) - // Standard Error: 1_163 - .saturating_add(Weight::from_parts(124_815, 0).saturating_mul(s.into())) + // Minimum execution time: 27_797_000 picoseconds. + Weight::from_parts(29_064_584, 6811) + // Standard Error: 817 + .saturating_add(Weight::from_parts(108_179, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `320` // Estimated: `6811` - // Minimum execution time: 18_411_000 picoseconds. - Weight::from_parts(19_431_787, 6811) - // Standard Error: 694 - .saturating_add(Weight::from_parts(107_220, 0).saturating_mul(s.into())) + // Minimum execution time: 15_236_000 picoseconds. + Weight::from_parts(16_360_247, 6811) + // Standard Error: 584 + .saturating_add(Weight::from_parts(94_917, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `492 + s * (1 ±0)` // Estimated: `6811` - // Minimum execution time: 33_985_000 picoseconds. - Weight::from_parts(35_547_970, 6811) - // Standard Error: 1_135 - .saturating_add(Weight::from_parts(116_537, 0).saturating_mul(s.into())) + // Minimum execution time: 28_730_000 picoseconds. + Weight::from_parts(30_056_661, 6811) + // Standard Error: 792 + .saturating_add(Weight::from_parts(108_212, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `z` is `[0, 10000]`. fn as_multi_threshold_1(z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_452_000 picoseconds. - Weight::from_parts(14_425_869, 0) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 18_893_000 picoseconds. + Weight::from_parts(20_278_307, 3997) // Standard Error: 4 - .saturating_add(Weight::from_parts(493, 0).saturating_mul(z.into())) + .saturating_add(Weight::from_parts(488, 0).saturating_mul(z.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `301 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 46_012_000 picoseconds. - Weight::from_parts(34_797_344, 6811) - // Standard Error: 833 - .saturating_add(Weight::from_parts(127_671, 0).saturating_mul(s.into())) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_498, 0).saturating_mul(z.into())) + // Minimum execution time: 39_478_000 picoseconds. + Weight::from_parts(29_195_487, 6811) + // Standard Error: 739 + .saturating_add(Weight::from_parts(118_766, 0).saturating_mul(s.into())) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_511, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[3, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `320` // Estimated: `6811` - // Minimum execution time: 29_834_000 picoseconds. - Weight::from_parts(20_189_154, 6811) - // Standard Error: 637 - .saturating_add(Weight::from_parts(110_080, 0).saturating_mul(s.into())) - // Standard Error: 6 - .saturating_add(Weight::from_parts(1_483, 0).saturating_mul(z.into())) + // Minimum execution time: 26_401_000 picoseconds. + Weight::from_parts(17_277_296, 6811) + // Standard Error: 492 + .saturating_add(Weight::from_parts(101_763, 0).saturating_mul(s.into())) + // Standard Error: 4 + .saturating_add(Weight::from_parts(1_486, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `426 + s * (33 ±0)` + // Measured: `571 + s * (33 ±0)` // Estimated: `6811` - // Minimum execution time: 51_464_000 picoseconds. - Weight::from_parts(39_246_644, 6811) - // Standard Error: 1_251 - .saturating_add(Weight::from_parts(143_313, 0).saturating_mul(s.into())) + // Minimum execution time: 52_430_000 picoseconds. + Weight::from_parts(40_585_478, 6811) + // Standard Error: 1_240 + .saturating_add(Weight::from_parts(161_405, 0).saturating_mul(s.into())) // Standard Error: 12 - .saturating_add(Weight::from_parts(1_523, 0).saturating_mul(z.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(Weight::from_parts(1_547, 0).saturating_mul(z.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `301 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 33_275_000 picoseconds. - Weight::from_parts(34_073_221, 6811) - // Standard Error: 1_163 - .saturating_add(Weight::from_parts(124_815, 0).saturating_mul(s.into())) + // Minimum execution time: 27_797_000 picoseconds. + Weight::from_parts(29_064_584, 6811) + // Standard Error: 817 + .saturating_add(Weight::from_parts(108_179, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `320` // Estimated: `6811` - // Minimum execution time: 18_411_000 picoseconds. - Weight::from_parts(19_431_787, 6811) - // Standard Error: 694 - .saturating_add(Weight::from_parts(107_220, 0).saturating_mul(s.into())) + // Minimum execution time: 15_236_000 picoseconds. + Weight::from_parts(16_360_247, 6811) + // Standard Error: 584 + .saturating_add(Weight::from_parts(94_917, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `492 + s * (1 ±0)` // Estimated: `6811` - // Minimum execution time: 33_985_000 picoseconds. - Weight::from_parts(35_547_970, 6811) - // Standard Error: 1_135 - .saturating_add(Weight::from_parts(116_537, 0).saturating_mul(s.into())) + // Minimum execution time: 28_730_000 picoseconds. + Weight::from_parts(30_056_661, 6811) + // Standard Error: 792 + .saturating_add(Weight::from_parts(108_212, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/nft-fractionalization/src/weights.rs b/substrate/frame/nft-fractionalization/src/weights.rs index ebb4aa0fbcfba2df71b6b7ecb9368540ef8b9ffa..07872ebaea9083d9e9cf5326b5b93c1da7bcf9e3 100644 --- a/substrate/frame/nft-fractionalization/src/weights.rs +++ b/substrate/frame/nft-fractionalization/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_nft_fractionalization +//! Autogenerated weights for `pallet_nft_fractionalization` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/nft-fractionalization/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/nft-fractionalization/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,136 +49,136 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_nft_fractionalization. +/// Weight functions needed for `pallet_nft_fractionalization`. pub trait WeightInfo { fn fractionalize() -> Weight; fn unify() -> Weight; } -/// Weights for pallet_nft_fractionalization using the Substrate node and recommended hardware. +/// Weights for `pallet_nft_fractionalization` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: NftFractionalization NftToAsset (r:0 w:1) - /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + /// Storage: `NftFractionalization::NftToAsset` (r:0 w:1) + /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) fn fractionalize() -> Weight { // Proof Size summary in bytes: // Measured: `609` // Estimated: `4326` - // Minimum execution time: 187_416_000 picoseconds. - Weight::from_parts(191_131_000, 4326) + // Minimum execution time: 164_764_000 picoseconds. + Weight::from_parts(168_243_000, 4326) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } - /// Storage: NftFractionalization NftToAsset (r:1 w:1) - /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `NftFractionalization::NftToAsset` (r:1 w:1) + /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn unify() -> Weight { // Proof Size summary in bytes: // Measured: `1422` // Estimated: `4326` - // Minimum execution time: 134_159_000 picoseconds. - Weight::from_parts(136_621_000, 4326) + // Minimum execution time: 120_036_000 picoseconds. + Weight::from_parts(123_550_000, 4326) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: NftFractionalization NftToAsset (r:0 w:1) - /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + /// Storage: `NftFractionalization::NftToAsset` (r:0 w:1) + /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) fn fractionalize() -> Weight { // Proof Size summary in bytes: // Measured: `609` // Estimated: `4326` - // Minimum execution time: 187_416_000 picoseconds. - Weight::from_parts(191_131_000, 4326) + // Minimum execution time: 164_764_000 picoseconds. + Weight::from_parts(168_243_000, 4326) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } - /// Storage: NftFractionalization NftToAsset (r:1 w:1) - /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `NftFractionalization::NftToAsset` (r:1 w:1) + /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn unify() -> Weight { // Proof Size summary in bytes: // Measured: `1422` // Estimated: `4326` - // Minimum execution time: 134_159_000 picoseconds. - Weight::from_parts(136_621_000, 4326) + // Minimum execution time: 120_036_000 picoseconds. + Weight::from_parts(123_550_000, 4326) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } diff --git a/substrate/frame/nfts/src/lib.rs b/substrate/frame/nfts/src/lib.rs index a7d505e2e397dbca5f547ed9ed1a7c3cc4235aa5..7cf7cdc61a7d58c1cee641cee1fde92e92836876 100644 --- a/substrate/frame/nfts/src/lib.rs +++ b/substrate/frame/nfts/src/lib.rs @@ -76,7 +76,7 @@ pub mod pallet { use frame_support::{pallet_prelude::*, traits::ExistenceRequirement}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/nfts/src/migration.rs b/substrate/frame/nfts/src/migration.rs index d4cdf7e820d15b7df1d8f36807baf34b81c34bd3..8f82e092262fb2d63cf1dda563afb0dd11c6f6ac 100644 --- a/substrate/frame/nfts/src/migration.rs +++ b/substrate/frame/nfts/src/migration.rs @@ -54,17 +54,17 @@ pub mod v1 { pub struct MigrateToV1(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); log::info!( target: LOG_TARGET, - "Running migration with current storage version {:?} / onchain {:?}", - current_version, - onchain_version + "Running migration with in-code storage version {:?} / onchain {:?}", + in_code_version, + on_chain_version ); - if onchain_version == 0 && current_version == 1 { + if on_chain_version == 0 && in_code_version == 1 { let mut translated = 0u64; let mut configs_iterated = 0u64; Collection::::translate::< @@ -77,13 +77,13 @@ pub mod v1 { Some(old_value.migrate_to_v1(item_configs)) }); - current_version.put::>(); + in_code_version.put::>(); log::info!( target: LOG_TARGET, "Upgraded {} records, storage to version {:?}", translated, - current_version + in_code_version ); T::DbWeight::get().reads_writes(translated + configs_iterated + 1, translated + 1) } else { diff --git a/substrate/frame/nfts/src/weights.rs b/substrate/frame/nfts/src/weights.rs index 6b8c577bb12e5d19bf5751d2a5019d60aea743b0..4fcc1e601f4116b75c4f86d7a19e41043f891876 100644 --- a/substrate/frame/nfts/src/weights.rs +++ b/substrate/frame/nfts/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_nfts +//! Autogenerated weights for `pallet_nfts` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -37,9 +37,9 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/nfts/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/nfts/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_nfts. +/// Weight functions needed for `pallet_nfts`. pub trait WeightInfo { fn create() -> Weight; fn force_create() -> Weight; @@ -92,564 +92,568 @@ pub trait WeightInfo { fn set_attributes_pre_signed(n: u32, ) -> Weight; } -/// Weights for pallet_nfts using the Substrate node and recommended hardware. +/// Weights for `pallet_nfts` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Nfts NextCollectionId (r:1 w:1) - /// Proof: Nfts NextCollectionId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:0 w:1) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:1) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::NextCollectionId` (r:1 w:1) + /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3549` - // Minimum execution time: 40_489_000 picoseconds. - Weight::from_parts(41_320_000, 3549) + // Minimum execution time: 34_035_000 picoseconds. + Weight::from_parts(35_228_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Nfts NextCollectionId (r:1 w:1) - /// Proof: Nfts NextCollectionId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:0 w:1) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:1) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::NextCollectionId` (r:1 w:1) + /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3549` - // Minimum execution time: 23_257_000 picoseconds. - Weight::from_parts(23_770_000, 3549) + // Minimum execution time: 19_430_000 picoseconds. + Weight::from_parts(20_054_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:0) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:1) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1001 w:1000) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1000 w:1000) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts CollectionMetadataOf (r:0 w:1) - /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:1) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1001 w:1000) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1000 w:1000) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:0 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// The range of component `m` is `[0, 1000]`. /// The range of component `c` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `32220 + a * (332 ±0)` - // Estimated: `2523990 + a * (2921 ±0)` - // Minimum execution time: 1_310_198_000 picoseconds. - Weight::from_parts(1_479_261_043, 2523990) - // Standard Error: 4_415 - .saturating_add(Weight::from_parts(6_016_212, 0).saturating_mul(a.into())) + // Measured: `32204 + a * (366 ±0)` + // Estimated: `2523990 + a * (2954 ±0)` + // Minimum execution time: 1_249_733_000 picoseconds. + Weight::from_parts(1_293_703_849, 2523990) + // Standard Error: 4_764 + .saturating_add(Weight::from_parts(6_433_523, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(1004_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(T::DbWeight::get().writes(1005_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(a.into())) - } - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(a.into())) + } + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn mint() -> Weight { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 51_910_000 picoseconds. - Weight::from_parts(53_441_000, 4326) + // Minimum execution time: 48_645_000 picoseconds. + Weight::from_parts(50_287_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn force_mint() -> Weight { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 50_168_000 picoseconds. - Weight::from_parts(51_380_000, 4326) + // Minimum execution time: 46_688_000 picoseconds. + Weight::from_parts(47_680_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:0) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:0 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:0 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn burn() -> Weight { // Proof Size summary in bytes: // Measured: `564` // Estimated: `4326` - // Minimum execution time: 50_738_000 picoseconds. - Weight::from_parts(51_850_000, 4326) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Minimum execution time: 51_771_000 picoseconds. + Weight::from_parts(53_492_000, 4326) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:0) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:2) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:2) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `593` // Estimated: `4326` - // Minimum execution time: 41_055_000 picoseconds. - Weight::from_parts(42_336_000, 4326) + // Minimum execution time: 39_166_000 picoseconds. + Weight::from_parts(40_128_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:5000 w:5000) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:5000 w:5000) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `763 + i * (108 ±0)` // Estimated: `3549 + i * (3336 ±0)` - // Minimum execution time: 15_688_000 picoseconds. - Weight::from_parts(15_921_000, 3549) - // Standard Error: 14_827 - .saturating_add(Weight::from_parts(17_105_395, 0).saturating_mul(i.into())) + // Minimum execution time: 13_804_000 picoseconds. + Weight::from_parts(14_159_000, 3549) + // Standard Error: 13_812 + .saturating_add(Weight::from_parts(14_661_284, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3336).saturating_mul(i.into())) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn lock_item_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 19_981_000 picoseconds. - Weight::from_parts(20_676_000, 3534) + // Minimum execution time: 17_485_000 picoseconds. + Weight::from_parts(18_412_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn unlock_item_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 19_911_000 picoseconds. - Weight::from_parts(20_612_000, 3534) + // Minimum execution time: 17_335_000 picoseconds. + Weight::from_parts(18_543_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn lock_collection() -> Weight { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 16_441_000 picoseconds. - Weight::from_parts(16_890_000, 3549) + // Minimum execution time: 14_608_000 picoseconds. + Weight::from_parts(15_696_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts OwnershipAcceptance (r:1 w:1) - /// Proof: Nfts OwnershipAcceptance (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:2) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:2) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `3549` - // Minimum execution time: 22_610_000 picoseconds. - Weight::from_parts(23_422_000, 3549) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `562` + // Estimated: `3593` + // Minimum execution time: 25_686_000 picoseconds. + Weight::from_parts(26_433_000, 3593) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:2 w:4) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:2 w:4) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn set_team() -> Weight { // Proof Size summary in bytes: // Measured: `369` // Estimated: `6078` - // Minimum execution time: 39_739_000 picoseconds. - Weight::from_parts(41_306_000, 6078) + // Minimum execution time: 37_192_000 picoseconds. + Weight::from_parts(38_561_000, 6078) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:2) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:2) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_collection_owner() -> Weight { // Proof Size summary in bytes: // Measured: `311` // Estimated: `3549` - // Minimum execution time: 17_685_000 picoseconds. - Weight::from_parts(18_258_000, 3549) + // Minimum execution time: 15_401_000 picoseconds. + Weight::from_parts(15_826_000, 3549) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn force_collection_config() -> Weight { // Proof Size summary in bytes: // Measured: `276` // Estimated: `3549` - // Minimum execution time: 13_734_000 picoseconds. - Weight::from_parts(14_337_000, 3549) + // Minimum execution time: 11_683_000 picoseconds. + Weight::from_parts(12_255_000, 3549) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn lock_item_properties() -> Weight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 19_269_000 picoseconds. - Weight::from_parts(19_859_000, 3534) + // Minimum execution time: 16_899_000 picoseconds. + Weight::from_parts(17_404_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) fn set_attribute() -> Weight { // Proof Size summary in bytes: // Measured: `539` - // Estimated: `3911` - // Minimum execution time: 51_540_000 picoseconds. - Weight::from_parts(52_663_000, 3911) + // Estimated: `3944` + // Minimum execution time: 46_768_000 picoseconds. + Weight::from_parts(47_834_000, 3944) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) fn force_set_attribute() -> Weight { // Proof Size summary in bytes: // Measured: `344` - // Estimated: `3911` - // Minimum execution time: 26_529_000 picoseconds. - Weight::from_parts(27_305_000, 3911) + // Estimated: `3944` + // Minimum execution time: 23_356_000 picoseconds. + Weight::from_parts(24_528_000, 3944) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `950` - // Estimated: `3911` - // Minimum execution time: 46_951_000 picoseconds. - Weight::from_parts(48_481_000, 3911) + // Measured: `983` + // Estimated: `3944` + // Minimum execution time: 43_061_000 picoseconds. + Weight::from_parts(44_024_000, 3944) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) fn approve_item_attributes() -> Weight { // Proof Size summary in bytes: // Measured: `381` // Estimated: `4326` - // Minimum execution time: 17_222_000 picoseconds. - Weight::from_parts(17_819_000, 4326) + // Minimum execution time: 14_929_000 picoseconds. + Weight::from_parts(15_344_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1001 w:1000) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1001 w:1000) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn cancel_item_attributes_approval(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `837 + n * (364 ±0)` - // Estimated: `4326 + n * (2921 ±0)` - // Minimum execution time: 26_185_000 picoseconds. - Weight::from_parts(27_038_000, 4326) - // Standard Error: 2_378 - .saturating_add(Weight::from_parts(6_067_888, 0).saturating_mul(n.into())) + // Measured: `831 + n * (398 ±0)` + // Estimated: `4326 + n * (2954 ±0)` + // Minimum execution time: 23_707_000 picoseconds. + Weight::from_parts(24_688_000, 4326) + // Standard Error: 3_813 + .saturating_add(Weight::from_parts(6_422_256, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(n.into())) - } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:1) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) fn set_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `539` - // Estimated: `3605` - // Minimum execution time: 42_120_000 picoseconds. - Weight::from_parts(43_627_000, 3605) + // Estimated: `3812` + // Minimum execution time: 37_882_000 picoseconds. + Weight::from_parts(39_222_000, 3812) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:1) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `642` - // Estimated: `3605` - // Minimum execution time: 40_732_000 picoseconds. - Weight::from_parts(42_760_000, 3605) + // Measured: `849` + // Estimated: `3812` + // Minimum execution time: 36_629_000 picoseconds. + Weight::from_parts(37_351_000, 3812) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionMetadataOf (r:1 w:1) - /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `398` - // Estimated: `3552` - // Minimum execution time: 39_443_000 picoseconds. - Weight::from_parts(40_482_000, 3552) + // Estimated: `3759` + // Minimum execution time: 34_853_000 picoseconds. + Weight::from_parts(35_914_000, 3759) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionMetadataOf (r:1 w:1) - /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `509` - // Estimated: `3552` - // Minimum execution time: 37_676_000 picoseconds. - Weight::from_parts(39_527_000, 3552) + // Measured: `716` + // Estimated: `3759` + // Minimum execution time: 33_759_000 picoseconds. + Weight::from_parts(34_729_000, 3759) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn approve_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `410` // Estimated: `4326` - // Minimum execution time: 20_787_000 picoseconds. - Weight::from_parts(21_315_000, 4326) + // Minimum execution time: 17_583_000 picoseconds. + Weight::from_parts(18_675_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) fn cancel_approval() -> Weight { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 18_200_000 picoseconds. - Weight::from_parts(19_064_000, 4326) + // Minimum execution time: 15_036_000 picoseconds. + Weight::from_parts(15_995_000, 4326) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) fn clear_all_transfer_approvals() -> Weight { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 17_128_000 picoseconds. - Weight::from_parts(17_952_000, 4326) + // Minimum execution time: 14_666_000 picoseconds. + Weight::from_parts(15_152_000, 4326) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts OwnershipAcceptance (r:1 w:1) - /// Proof: Nfts OwnershipAcceptance (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_accept_ownership() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3517` - // Minimum execution time: 14_667_000 picoseconds. - Weight::from_parts(15_262_000, 3517) + // Minimum execution time: 12_393_000 picoseconds. + Weight::from_parts(12_895_000, 3517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionConfigOf (r:1 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 18_435_000 picoseconds. - Weight::from_parts(18_775_000, 3549) + // Minimum execution time: 16_034_000 picoseconds. + Weight::from_parts(16_617_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn update_mint_settings() -> Weight { // Proof Size summary in bytes: // Measured: `323` // Estimated: `3538` - // Minimum execution time: 18_125_000 picoseconds. - Weight::from_parts(18_415_000, 3538) + // Minimum execution time: 15_812_000 picoseconds. + Weight::from_parts(16_644_000, 3538) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn set_price() -> Weight { // Proof Size summary in bytes: // Measured: `518` // Estimated: `4326` - // Minimum execution time: 23_237_000 picoseconds. - Weight::from_parts(24_128_000, 4326) + // Minimum execution time: 21_650_000 picoseconds. + Weight::from_parts(22_443_000, 4326) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:1 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:0) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:2) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:1 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:2) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn buy_item() -> Weight { // Proof Size summary in bytes: // Measured: `705` // Estimated: `4326` - // Minimum execution time: 53_291_000 picoseconds. - Weight::from_parts(54_614_000, 4326) + // Minimum execution time: 49_463_000 picoseconds. + Weight::from_parts(50_937_000, 4326) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -658,681 +662,685 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_192_000 picoseconds. - Weight::from_parts(4_039_901, 0) - // Standard Error: 10_309 - .saturating_add(Weight::from_parts(3_934_017, 0).saturating_mul(n.into())) - } - /// Storage: Nfts Item (r:2 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + // Minimum execution time: 2_029_000 picoseconds. + Weight::from_parts(3_749_829, 0) + // Standard Error: 8_497 + .saturating_add(Weight::from_parts(1_913_514, 0).saturating_mul(n.into())) + } + /// Storage: `Nfts::Item` (r:2 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn create_swap() -> Weight { // Proof Size summary in bytes: // Measured: `494` // Estimated: `7662` - // Minimum execution time: 21_011_000 picoseconds. - Weight::from_parts(22_065_000, 7662) + // Minimum execution time: 18_181_000 picoseconds. + Weight::from_parts(18_698_000, 7662) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts PendingSwapOf (r:1 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::PendingSwapOf` (r:1 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) fn cancel_swap() -> Weight { // Proof Size summary in bytes: // Measured: `513` // Estimated: `4326` - // Minimum execution time: 21_423_000 picoseconds. - Weight::from_parts(21_743_000, 4326) + // Minimum execution time: 18_228_000 picoseconds. + Weight::from_parts(18_940_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:2 w:2) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:1 w:2) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:2 w:0) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:2 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:4) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:2) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:2 w:2) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:1 w:2) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:2 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:2 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:4) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:2) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn claim_swap() -> Weight { // Proof Size summary in bytes: // Measured: `834` // Estimated: `7662` - // Minimum execution time: 86_059_000 picoseconds. - Weight::from_parts(88_401_000, 7662) + // Minimum execution time: 77_983_000 picoseconds. + Weight::from_parts(79_887_000, 7662) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } - /// Storage: Nfts CollectionRoleOf (r:2 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:10 w:10) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:1) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:2 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:10 w:10) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 10]`. fn mint_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `629` - // Estimated: `6078 + n * (2921 ±0)` - // Minimum execution time: 146_746_000 picoseconds. - Weight::from_parts(152_885_862, 6078) - // Standard Error: 40_442 - .saturating_add(Weight::from_parts(32_887_800, 0).saturating_mul(n.into())) + // Estimated: `6078 + n * (2954 ±0)` + // Minimum execution time: 126_998_000 picoseconds. + Weight::from_parts(134_149_389, 6078) + // Standard Error: 33_180 + .saturating_add(Weight::from_parts(30_711_206, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(n.into())) - } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:10 w:10) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) + } + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:10 w:10) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 10]`. fn set_attributes_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `659` - // Estimated: `4326 + n * (2921 ±0)` - // Minimum execution time: 83_960_000 picoseconds. - Weight::from_parts(98_609_885, 4326) - // Standard Error: 85_991 - .saturating_add(Weight::from_parts(32_633_495, 0).saturating_mul(n.into())) + // Estimated: `4326 + n * (2954 ±0)` + // Minimum execution time: 66_213_000 picoseconds. + Weight::from_parts(81_661_819, 4326) + // Standard Error: 87_003 + .saturating_add(Weight::from_parts(29_550_476, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Nfts NextCollectionId (r:1 w:1) - /// Proof: Nfts NextCollectionId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:0 w:1) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:1) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::NextCollectionId` (r:1 w:1) + /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3549` - // Minimum execution time: 40_489_000 picoseconds. - Weight::from_parts(41_320_000, 3549) + // Minimum execution time: 34_035_000 picoseconds. + Weight::from_parts(35_228_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Nfts NextCollectionId (r:1 w:1) - /// Proof: Nfts NextCollectionId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:0 w:1) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:1) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::NextCollectionId` (r:1 w:1) + /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3549` - // Minimum execution time: 23_257_000 picoseconds. - Weight::from_parts(23_770_000, 3549) + // Minimum execution time: 19_430_000 picoseconds. + Weight::from_parts(20_054_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:0) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:1) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1001 w:1000) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1000 w:1000) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts CollectionMetadataOf (r:0 w:1) - /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:1) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1001 w:1000) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1000 w:1000) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:0 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// The range of component `m` is `[0, 1000]`. /// The range of component `c` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `32220 + a * (332 ±0)` - // Estimated: `2523990 + a * (2921 ±0)` - // Minimum execution time: 1_310_198_000 picoseconds. - Weight::from_parts(1_479_261_043, 2523990) - // Standard Error: 4_415 - .saturating_add(Weight::from_parts(6_016_212, 0).saturating_mul(a.into())) + // Measured: `32204 + a * (366 ±0)` + // Estimated: `2523990 + a * (2954 ±0)` + // Minimum execution time: 1_249_733_000 picoseconds. + Weight::from_parts(1_293_703_849, 2523990) + // Standard Error: 4_764 + .saturating_add(Weight::from_parts(6_433_523, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(1004_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(RocksDbWeight::get().writes(1005_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(a.into())) - } - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(a.into())) + } + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn mint() -> Weight { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 51_910_000 picoseconds. - Weight::from_parts(53_441_000, 4326) + // Minimum execution time: 48_645_000 picoseconds. + Weight::from_parts(50_287_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn force_mint() -> Weight { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 50_168_000 picoseconds. - Weight::from_parts(51_380_000, 4326) + // Minimum execution time: 46_688_000 picoseconds. + Weight::from_parts(47_680_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:0) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:0 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:0 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn burn() -> Weight { // Proof Size summary in bytes: // Measured: `564` // Estimated: `4326` - // Minimum execution time: 50_738_000 picoseconds. - Weight::from_parts(51_850_000, 4326) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Minimum execution time: 51_771_000 picoseconds. + Weight::from_parts(53_492_000, 4326) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:0) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:2) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:2) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `593` // Estimated: `4326` - // Minimum execution time: 41_055_000 picoseconds. - Weight::from_parts(42_336_000, 4326) + // Minimum execution time: 39_166_000 picoseconds. + Weight::from_parts(40_128_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:5000 w:5000) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:5000 w:5000) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `763 + i * (108 ±0)` // Estimated: `3549 + i * (3336 ±0)` - // Minimum execution time: 15_688_000 picoseconds. - Weight::from_parts(15_921_000, 3549) - // Standard Error: 14_827 - .saturating_add(Weight::from_parts(17_105_395, 0).saturating_mul(i.into())) + // Minimum execution time: 13_804_000 picoseconds. + Weight::from_parts(14_159_000, 3549) + // Standard Error: 13_812 + .saturating_add(Weight::from_parts(14_661_284, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3336).saturating_mul(i.into())) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn lock_item_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 19_981_000 picoseconds. - Weight::from_parts(20_676_000, 3534) + // Minimum execution time: 17_485_000 picoseconds. + Weight::from_parts(18_412_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn unlock_item_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 19_911_000 picoseconds. - Weight::from_parts(20_612_000, 3534) + // Minimum execution time: 17_335_000 picoseconds. + Weight::from_parts(18_543_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn lock_collection() -> Weight { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 16_441_000 picoseconds. - Weight::from_parts(16_890_000, 3549) + // Minimum execution time: 14_608_000 picoseconds. + Weight::from_parts(15_696_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts OwnershipAcceptance (r:1 w:1) - /// Proof: Nfts OwnershipAcceptance (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:2) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:2) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `3549` - // Minimum execution time: 22_610_000 picoseconds. - Weight::from_parts(23_422_000, 3549) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `562` + // Estimated: `3593` + // Minimum execution time: 25_686_000 picoseconds. + Weight::from_parts(26_433_000, 3593) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:2 w:4) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:2 w:4) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn set_team() -> Weight { // Proof Size summary in bytes: // Measured: `369` // Estimated: `6078` - // Minimum execution time: 39_739_000 picoseconds. - Weight::from_parts(41_306_000, 6078) + // Minimum execution time: 37_192_000 picoseconds. + Weight::from_parts(38_561_000, 6078) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:2) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:2) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_collection_owner() -> Weight { // Proof Size summary in bytes: // Measured: `311` // Estimated: `3549` - // Minimum execution time: 17_685_000 picoseconds. - Weight::from_parts(18_258_000, 3549) + // Minimum execution time: 15_401_000 picoseconds. + Weight::from_parts(15_826_000, 3549) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn force_collection_config() -> Weight { // Proof Size summary in bytes: // Measured: `276` // Estimated: `3549` - // Minimum execution time: 13_734_000 picoseconds. - Weight::from_parts(14_337_000, 3549) + // Minimum execution time: 11_683_000 picoseconds. + Weight::from_parts(12_255_000, 3549) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn lock_item_properties() -> Weight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 19_269_000 picoseconds. - Weight::from_parts(19_859_000, 3534) + // Minimum execution time: 16_899_000 picoseconds. + Weight::from_parts(17_404_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) fn set_attribute() -> Weight { // Proof Size summary in bytes: // Measured: `539` - // Estimated: `3911` - // Minimum execution time: 51_540_000 picoseconds. - Weight::from_parts(52_663_000, 3911) + // Estimated: `3944` + // Minimum execution time: 46_768_000 picoseconds. + Weight::from_parts(47_834_000, 3944) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) fn force_set_attribute() -> Weight { // Proof Size summary in bytes: // Measured: `344` - // Estimated: `3911` - // Minimum execution time: 26_529_000 picoseconds. - Weight::from_parts(27_305_000, 3911) + // Estimated: `3944` + // Minimum execution time: 23_356_000 picoseconds. + Weight::from_parts(24_528_000, 3944) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `950` - // Estimated: `3911` - // Minimum execution time: 46_951_000 picoseconds. - Weight::from_parts(48_481_000, 3911) + // Measured: `983` + // Estimated: `3944` + // Minimum execution time: 43_061_000 picoseconds. + Weight::from_parts(44_024_000, 3944) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) fn approve_item_attributes() -> Weight { // Proof Size summary in bytes: // Measured: `381` // Estimated: `4326` - // Minimum execution time: 17_222_000 picoseconds. - Weight::from_parts(17_819_000, 4326) + // Minimum execution time: 14_929_000 picoseconds. + Weight::from_parts(15_344_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1001 w:1000) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1001 w:1000) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn cancel_item_attributes_approval(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `837 + n * (364 ±0)` - // Estimated: `4326 + n * (2921 ±0)` - // Minimum execution time: 26_185_000 picoseconds. - Weight::from_parts(27_038_000, 4326) - // Standard Error: 2_378 - .saturating_add(Weight::from_parts(6_067_888, 0).saturating_mul(n.into())) + // Measured: `831 + n * (398 ±0)` + // Estimated: `4326 + n * (2954 ±0)` + // Minimum execution time: 23_707_000 picoseconds. + Weight::from_parts(24_688_000, 4326) + // Standard Error: 3_813 + .saturating_add(Weight::from_parts(6_422_256, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(n.into())) - } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:1) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) fn set_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `539` - // Estimated: `3605` - // Minimum execution time: 42_120_000 picoseconds. - Weight::from_parts(43_627_000, 3605) + // Estimated: `3812` + // Minimum execution time: 37_882_000 picoseconds. + Weight::from_parts(39_222_000, 3812) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:1) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `642` - // Estimated: `3605` - // Minimum execution time: 40_732_000 picoseconds. - Weight::from_parts(42_760_000, 3605) + // Measured: `849` + // Estimated: `3812` + // Minimum execution time: 36_629_000 picoseconds. + Weight::from_parts(37_351_000, 3812) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionMetadataOf (r:1 w:1) - /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `398` - // Estimated: `3552` - // Minimum execution time: 39_443_000 picoseconds. - Weight::from_parts(40_482_000, 3552) + // Estimated: `3759` + // Minimum execution time: 34_853_000 picoseconds. + Weight::from_parts(35_914_000, 3759) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionMetadataOf (r:1 w:1) - /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `509` - // Estimated: `3552` - // Minimum execution time: 37_676_000 picoseconds. - Weight::from_parts(39_527_000, 3552) + // Measured: `716` + // Estimated: `3759` + // Minimum execution time: 33_759_000 picoseconds. + Weight::from_parts(34_729_000, 3759) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn approve_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `410` // Estimated: `4326` - // Minimum execution time: 20_787_000 picoseconds. - Weight::from_parts(21_315_000, 4326) + // Minimum execution time: 17_583_000 picoseconds. + Weight::from_parts(18_675_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) fn cancel_approval() -> Weight { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 18_200_000 picoseconds. - Weight::from_parts(19_064_000, 4326) + // Minimum execution time: 15_036_000 picoseconds. + Weight::from_parts(15_995_000, 4326) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) fn clear_all_transfer_approvals() -> Weight { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 17_128_000 picoseconds. - Weight::from_parts(17_952_000, 4326) + // Minimum execution time: 14_666_000 picoseconds. + Weight::from_parts(15_152_000, 4326) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts OwnershipAcceptance (r:1 w:1) - /// Proof: Nfts OwnershipAcceptance (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_accept_ownership() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3517` - // Minimum execution time: 14_667_000 picoseconds. - Weight::from_parts(15_262_000, 3517) + // Minimum execution time: 12_393_000 picoseconds. + Weight::from_parts(12_895_000, 3517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionConfigOf (r:1 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 18_435_000 picoseconds. - Weight::from_parts(18_775_000, 3549) + // Minimum execution time: 16_034_000 picoseconds. + Weight::from_parts(16_617_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn update_mint_settings() -> Weight { // Proof Size summary in bytes: // Measured: `323` // Estimated: `3538` - // Minimum execution time: 18_125_000 picoseconds. - Weight::from_parts(18_415_000, 3538) + // Minimum execution time: 15_812_000 picoseconds. + Weight::from_parts(16_644_000, 3538) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn set_price() -> Weight { // Proof Size summary in bytes: // Measured: `518` // Estimated: `4326` - // Minimum execution time: 23_237_000 picoseconds. - Weight::from_parts(24_128_000, 4326) + // Minimum execution time: 21_650_000 picoseconds. + Weight::from_parts(22_443_000, 4326) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:1 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:0) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:2) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:1 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:2) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn buy_item() -> Weight { // Proof Size summary in bytes: // Measured: `705` // Estimated: `4326` - // Minimum execution time: 53_291_000 picoseconds. - Weight::from_parts(54_614_000, 4326) + // Minimum execution time: 49_463_000 picoseconds. + Weight::from_parts(50_937_000, 4326) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1341,120 +1349,120 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_192_000 picoseconds. - Weight::from_parts(4_039_901, 0) - // Standard Error: 10_309 - .saturating_add(Weight::from_parts(3_934_017, 0).saturating_mul(n.into())) - } - /// Storage: Nfts Item (r:2 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + // Minimum execution time: 2_029_000 picoseconds. + Weight::from_parts(3_749_829, 0) + // Standard Error: 8_497 + .saturating_add(Weight::from_parts(1_913_514, 0).saturating_mul(n.into())) + } + /// Storage: `Nfts::Item` (r:2 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn create_swap() -> Weight { // Proof Size summary in bytes: // Measured: `494` // Estimated: `7662` - // Minimum execution time: 21_011_000 picoseconds. - Weight::from_parts(22_065_000, 7662) + // Minimum execution time: 18_181_000 picoseconds. + Weight::from_parts(18_698_000, 7662) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts PendingSwapOf (r:1 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::PendingSwapOf` (r:1 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) fn cancel_swap() -> Weight { // Proof Size summary in bytes: // Measured: `513` // Estimated: `4326` - // Minimum execution time: 21_423_000 picoseconds. - Weight::from_parts(21_743_000, 4326) + // Minimum execution time: 18_228_000 picoseconds. + Weight::from_parts(18_940_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:2 w:2) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:1 w:2) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:2 w:0) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:2 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:4) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:2) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:2 w:2) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:1 w:2) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:2 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:2 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:4) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:2) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn claim_swap() -> Weight { // Proof Size summary in bytes: // Measured: `834` // Estimated: `7662` - // Minimum execution time: 86_059_000 picoseconds. - Weight::from_parts(88_401_000, 7662) + // Minimum execution time: 77_983_000 picoseconds. + Weight::from_parts(79_887_000, 7662) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } - /// Storage: Nfts CollectionRoleOf (r:2 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:10 w:10) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:1) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:2 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:10 w:10) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 10]`. fn mint_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `629` - // Estimated: `6078 + n * (2921 ±0)` - // Minimum execution time: 146_746_000 picoseconds. - Weight::from_parts(152_885_862, 6078) - // Standard Error: 40_442 - .saturating_add(Weight::from_parts(32_887_800, 0).saturating_mul(n.into())) + // Estimated: `6078 + n * (2954 ±0)` + // Minimum execution time: 126_998_000 picoseconds. + Weight::from_parts(134_149_389, 6078) + // Standard Error: 33_180 + .saturating_add(Weight::from_parts(30_711_206, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(n.into())) - } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:10 w:10) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) + } + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:10 w:10) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 10]`. fn set_attributes_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `659` - // Estimated: `4326 + n * (2921 ±0)` - // Minimum execution time: 83_960_000 picoseconds. - Weight::from_parts(98_609_885, 4326) - // Standard Error: 85_991 - .saturating_add(Weight::from_parts(32_633_495, 0).saturating_mul(n.into())) + // Estimated: `4326 + n * (2954 ±0)` + // Minimum execution time: 66_213_000 picoseconds. + Weight::from_parts(81_661_819, 4326) + // Standard Error: 87_003 + .saturating_add(Weight::from_parts(29_550_476, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) } } diff --git a/substrate/frame/nis/src/weights.rs b/substrate/frame/nis/src/weights.rs index cba2f0049055b8f3e69aeba7d7d572ba19030801..6390827139119eaa2e98192313b7702766d0ccd1 100644 --- a/substrate/frame/nis/src/weights.rs +++ b/substrate/frame/nis/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_nis +//! Autogenerated weights for `pallet_nis` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/nis/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/nis/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_nis. +/// Weight functions needed for `pallet_nis`. pub trait WeightInfo { fn place_bid(l: u32, ) -> Weight; fn place_bid_max() -> Weight; @@ -65,367 +64,367 @@ pub trait WeightInfo { fn process_bid() -> Weight; } -/// Weights for pallet_nis using the Substrate node and recommended hardware. +/// Weights for `pallet_nis` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6176 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 49_410_000 picoseconds. - Weight::from_parts(57_832_282, 51487) - // Standard Error: 288 - .saturating_add(Weight::from_parts(51_621, 0).saturating_mul(l.into())) + // Minimum execution time: 47_908_000 picoseconds. + Weight::from_parts(50_096_676, 51487) + // Standard Error: 208 + .saturating_add(Weight::from_parts(41_318, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn place_bid_max() -> Weight { // Proof Size summary in bytes: // Measured: `54178` // Estimated: `51487` - // Minimum execution time: 119_696_000 picoseconds. - Weight::from_parts(121_838_000, 51487) + // Minimum execution time: 100_836_000 picoseconds. + Weight::from_parts(102_497_000, 51487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6176 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 50_843_000 picoseconds. - Weight::from_parts(54_237_365, 51487) - // Standard Error: 243 - .saturating_add(Weight::from_parts(67_732, 0).saturating_mul(l.into())) + // Minimum execution time: 45_830_000 picoseconds. + Weight::from_parts(46_667_676, 51487) + // Standard Error: 130 + .saturating_add(Weight::from_parts(33_007, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Nis Summary (r:1 w:0) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:0) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn fund_deficit() -> Weight { // Proof Size summary in bytes: // Measured: `191` // Estimated: `3593` - // Minimum execution time: 40_752_000 picoseconds. - Weight::from_parts(41_899_000, 3593) + // Minimum execution time: 30_440_000 picoseconds. + Weight::from_parts(31_240_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn communify() -> Weight { // Proof Size summary in bytes: // Measured: `668` // Estimated: `3675` - // Minimum execution time: 79_779_000 picoseconds. - Weight::from_parts(82_478_000, 3675) + // Minimum execution time: 71_017_000 picoseconds. + Weight::from_parts(72_504_000, 3675) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn privatize() -> Weight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `3675` - // Minimum execution time: 99_588_000 picoseconds. - Weight::from_parts(102_340_000, 3675) + // Minimum execution time: 89_138_000 picoseconds. + Weight::from_parts(91_290_000, 3675) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn thaw_private() -> Weight { // Proof Size summary in bytes: // Measured: `354` - // Estimated: `3593` - // Minimum execution time: 53_094_000 picoseconds. - Weight::from_parts(54_543_000, 3593) + // Estimated: `3658` + // Minimum execution time: 47_917_000 picoseconds. + Weight::from_parts(49_121_000, 3658) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn thaw_communal() -> Weight { // Proof Size summary in bytes: // Measured: `773` // Estimated: `3675` - // Minimum execution time: 107_248_000 picoseconds. - Weight::from_parts(109_923_000, 3675) + // Minimum execution time: 91_320_000 picoseconds. + Weight::from_parts(93_080_000, 3675) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn process_queues() -> Weight { // Proof Size summary in bytes: // Measured: `6624` // Estimated: `7487` - // Minimum execution time: 27_169_000 picoseconds. - Weight::from_parts(29_201_000, 7487) + // Minimum execution time: 20_117_000 picoseconds. + Weight::from_parts(20_829_000, 7487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) fn process_queue() -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `51487` - // Minimum execution time: 4_540_000 picoseconds. - Weight::from_parts(4_699_000, 51487) + // Minimum execution time: 4_460_000 picoseconds. + Weight::from_parts(4_797_000, 51487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nis Receipts (r:0 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:0 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn process_bid() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_085_000 picoseconds. - Weight::from_parts(7_336_000, 0) + // Minimum execution time: 4_609_000 picoseconds. + Weight::from_parts(4_834_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6176 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 49_410_000 picoseconds. - Weight::from_parts(57_832_282, 51487) - // Standard Error: 288 - .saturating_add(Weight::from_parts(51_621, 0).saturating_mul(l.into())) + // Minimum execution time: 47_908_000 picoseconds. + Weight::from_parts(50_096_676, 51487) + // Standard Error: 208 + .saturating_add(Weight::from_parts(41_318, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn place_bid_max() -> Weight { // Proof Size summary in bytes: // Measured: `54178` // Estimated: `51487` - // Minimum execution time: 119_696_000 picoseconds. - Weight::from_parts(121_838_000, 51487) + // Minimum execution time: 100_836_000 picoseconds. + Weight::from_parts(102_497_000, 51487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6176 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 50_843_000 picoseconds. - Weight::from_parts(54_237_365, 51487) - // Standard Error: 243 - .saturating_add(Weight::from_parts(67_732, 0).saturating_mul(l.into())) + // Minimum execution time: 45_830_000 picoseconds. + Weight::from_parts(46_667_676, 51487) + // Standard Error: 130 + .saturating_add(Weight::from_parts(33_007, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Nis Summary (r:1 w:0) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:0) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn fund_deficit() -> Weight { // Proof Size summary in bytes: // Measured: `191` // Estimated: `3593` - // Minimum execution time: 40_752_000 picoseconds. - Weight::from_parts(41_899_000, 3593) + // Minimum execution time: 30_440_000 picoseconds. + Weight::from_parts(31_240_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn communify() -> Weight { // Proof Size summary in bytes: // Measured: `668` // Estimated: `3675` - // Minimum execution time: 79_779_000 picoseconds. - Weight::from_parts(82_478_000, 3675) + // Minimum execution time: 71_017_000 picoseconds. + Weight::from_parts(72_504_000, 3675) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn privatize() -> Weight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `3675` - // Minimum execution time: 99_588_000 picoseconds. - Weight::from_parts(102_340_000, 3675) + // Minimum execution time: 89_138_000 picoseconds. + Weight::from_parts(91_290_000, 3675) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn thaw_private() -> Weight { // Proof Size summary in bytes: // Measured: `354` - // Estimated: `3593` - // Minimum execution time: 53_094_000 picoseconds. - Weight::from_parts(54_543_000, 3593) + // Estimated: `3658` + // Minimum execution time: 47_917_000 picoseconds. + Weight::from_parts(49_121_000, 3658) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn thaw_communal() -> Weight { // Proof Size summary in bytes: // Measured: `773` // Estimated: `3675` - // Minimum execution time: 107_248_000 picoseconds. - Weight::from_parts(109_923_000, 3675) + // Minimum execution time: 91_320_000 picoseconds. + Weight::from_parts(93_080_000, 3675) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn process_queues() -> Weight { // Proof Size summary in bytes: // Measured: `6624` // Estimated: `7487` - // Minimum execution time: 27_169_000 picoseconds. - Weight::from_parts(29_201_000, 7487) + // Minimum execution time: 20_117_000 picoseconds. + Weight::from_parts(20_829_000, 7487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) fn process_queue() -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `51487` - // Minimum execution time: 4_540_000 picoseconds. - Weight::from_parts(4_699_000, 51487) + // Minimum execution time: 4_460_000 picoseconds. + Weight::from_parts(4_797_000, 51487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nis Receipts (r:0 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:0 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn process_bid() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_085_000 picoseconds. - Weight::from_parts(7_336_000, 0) + // Minimum execution time: 4_609_000 picoseconds. + Weight::from_parts(4_834_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 074d59931ade733a3199e227a99c6dff819be793..c64db9ed69262f8337976e66bc3ad313ff487fd5 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -1576,7 +1576,7 @@ pub mod pallet { use frame_system::{ensure_signed, pallet_prelude::*}; use sp_runtime::Perbill; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(8); #[pallet::pallet] diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index 6887fcfa7ecad36172c17e36d55713df27865482..14410339f59a4dc809187feb6d289b16ec52f79c 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -342,25 +342,25 @@ pub mod v5 { pub struct MigrateToV5(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV5 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let in_code = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", - current, + "Running migration with in-code storage version {:?} / onchain {:?}", + in_code, onchain ); - if current == 5 && onchain == 4 { + if in_code == 5 && onchain == 4 { let mut translated = 0u64; RewardPools::::translate::, _>(|_id, old_value| { translated.saturating_inc(); Some(old_value.migrate_to_v5()) }); - current.put::>(); - log!(info, "Upgraded {} pools, storage to version {:?}", translated, current); + in_code.put::>(); + log!(info, "Upgraded {} pools, storage to version {:?}", translated, in_code); // reads: translated + onchain version. // writes: translated + current.put. @@ -498,12 +498,12 @@ pub mod v4 { #[allow(deprecated)] impl> OnRuntimeUpgrade for MigrateToV4 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); @@ -579,13 +579,13 @@ pub mod v3 { pub struct MigrateToV3(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV3 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); if onchain == 2 { log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); @@ -859,12 +859,12 @@ pub mod v2 { impl OnRuntimeUpgrade for MigrateToV2 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); @@ -976,12 +976,12 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); diff --git a/substrate/frame/nomination-pools/src/weights.rs b/substrate/frame/nomination-pools/src/weights.rs index 047a17c3f9a278ac9564562edf318f13ecc2f7ca..2749af7937febdf94fb38769e9dfb594abe40831 100644 --- a/substrate/frame/nomination-pools/src/weights.rs +++ b/substrate/frame/nomination-pools/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_nomination_pools` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_nomination_pools +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_nomination_pools -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/nomination-pools/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -112,8 +114,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3425` // Estimated: `8877` - // Minimum execution time: 184_295_000 picoseconds. - Weight::from_parts(188_860_000, 8877) + // Minimum execution time: 181_861_000 picoseconds. + Weight::from_parts(186_375_000, 8877) .saturating_add(T::DbWeight::get().reads(20_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -145,8 +147,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3435` // Estimated: `8877` - // Minimum execution time: 188_777_000 picoseconds. - Weight::from_parts(192_646_000, 8877) + // Minimum execution time: 182_273_000 picoseconds. + Weight::from_parts(186_635_000, 8877) .saturating_add(T::DbWeight::get().reads(17_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -180,8 +182,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3500` // Estimated: `8877` - // Minimum execution time: 221_728_000 picoseconds. - Weight::from_parts(227_569_000, 8877) + // Minimum execution time: 217_878_000 picoseconds. + Weight::from_parts(221_493_000, 8877) .saturating_add(T::DbWeight::get().reads(18_u64)) .saturating_add(T::DbWeight::get().writes(14_u64)) } @@ -201,8 +203,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1172` // Estimated: `3719` - // Minimum execution time: 75_310_000 picoseconds. - Weight::from_parts(77_709_000, 3719) + // Minimum execution time: 74_509_000 picoseconds. + Weight::from_parts(76_683_000, 3719) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -242,8 +244,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3622` // Estimated: `27847` - // Minimum execution time: 170_656_000 picoseconds. - Weight::from_parts(174_950_000, 27847) + // Minimum execution time: 166_424_000 picoseconds. + Weight::from_parts(169_698_000, 27847) .saturating_add(T::DbWeight::get().reads(20_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -259,18 +261,20 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1817` + // Measured: `1848` // Estimated: `4764` - // Minimum execution time: 68_866_000 picoseconds. - Weight::from_parts(72_312_887, 4764) - // Standard Error: 1_635 - .saturating_add(Weight::from_parts(41_679, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Minimum execution time: 66_110_000 picoseconds. + Weight::from_parts(68_620_141, 4764) + // Standard Error: 1_379 + .saturating_add(Weight::from_parts(54_961, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -291,6 +295,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) @@ -300,13 +306,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2207` + // Measured: `2238` // Estimated: `27847` - // Minimum execution time: 131_383_000 picoseconds. - Weight::from_parts(136_595_971, 27847) - // Standard Error: 2_715 - .saturating_add(Weight::from_parts(52_351, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Minimum execution time: 125_669_000 picoseconds. + Weight::from_parts(130_907_641, 27847) + // Standard Error: 2_490 + .saturating_add(Weight::from_parts(75_219, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(9_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -333,12 +339,12 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) - /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForReversePoolIdLookup` (r:1 w:1) /// Proof: `NominationPools::CounterForReversePoolIdLookup` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) @@ -360,8 +366,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2525` // Estimated: `27847` - // Minimum execution time: 233_314_000 picoseconds. - Weight::from_parts(241_694_316, 27847) + // Minimum execution time: 225_700_000 picoseconds. + Weight::from_parts(234_390_990, 27847) .saturating_add(T::DbWeight::get().reads(24_u64)) .saturating_add(T::DbWeight::get().writes(20_u64)) } @@ -413,8 +419,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1169` // Estimated: `8538` - // Minimum execution time: 171_465_000 picoseconds. - Weight::from_parts(176_478_000, 8538) + // Minimum execution time: 167_171_000 picoseconds. + Weight::from_parts(170_531_000, 8538) .saturating_add(T::DbWeight::get().reads(23_u64)) .saturating_add(T::DbWeight::get().writes(17_u64)) } @@ -447,10 +453,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1808` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 63_588_000 picoseconds. - Weight::from_parts(64_930_584, 4556) - // Standard Error: 9_167 - .saturating_add(Weight::from_parts(1_595_779, 0).saturating_mul(n.into())) + // Minimum execution time: 63_785_000 picoseconds. + Weight::from_parts(64_592_302, 4556) + // Standard Error: 9_416 + .saturating_add(Weight::from_parts(1_524_398, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -466,8 +472,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1434` // Estimated: `4556` - // Minimum execution time: 32_899_000 picoseconds. - Weight::from_parts(33_955_000, 4556) + // Minimum execution time: 32_096_000 picoseconds. + Weight::from_parts(33_533_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -482,10 +488,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3735` - // Minimum execution time: 13_778_000 picoseconds. - Weight::from_parts(14_770_006, 3735) - // Standard Error: 151 - .saturating_add(Weight::from_parts(1_900, 0).saturating_mul(n.into())) + // Minimum execution time: 13_914_000 picoseconds. + Weight::from_parts(14_800_402, 3735) + // Standard Error: 146 + .saturating_add(Weight::from_parts(940, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -505,8 +511,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_550_000 picoseconds. - Weight::from_parts(4_935_000, 0) + // Minimum execution time: 4_373_000 picoseconds. + Weight::from_parts(4_592_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -515,8 +521,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_759_000 picoseconds. - Weight::from_parts(17_346_000, 3719) + // Minimum execution time: 16_662_000 picoseconds. + Weight::from_parts(17_531_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -542,8 +548,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1971` // Estimated: `4556` - // Minimum execution time: 61_970_000 picoseconds. - Weight::from_parts(63_738_000, 4556) + // Minimum execution time: 61_348_000 picoseconds. + Weight::from_parts(63_712_000, 4556) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -559,8 +565,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `804` // Estimated: `3719` - // Minimum execution time: 31_950_000 picoseconds. - Weight::from_parts(33_190_000, 3719) + // Minimum execution time: 32_232_000 picoseconds. + Weight::from_parts(33_433_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -572,8 +578,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `572` // Estimated: `3719` - // Minimum execution time: 16_807_000 picoseconds. - Weight::from_parts(17_733_000, 3719) + // Minimum execution time: 16_774_000 picoseconds. + Weight::from_parts(17_671_000, 3719) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -583,8 +589,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_710_000 picoseconds. - Weight::from_parts(17_563_000, 3719) + // Minimum execution time: 16_724_000 picoseconds. + Weight::from_parts(17_181_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -594,8 +600,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_493_000 picoseconds. - Weight::from_parts(17_022_000, 3719) + // Minimum execution time: 16_362_000 picoseconds. + Weight::from_parts(17_135_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -607,8 +613,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 14_248_000 picoseconds. - Weight::from_parts(15_095_000, 3702) + // Minimum execution time: 14_125_000 picoseconds. + Weight::from_parts(14_705_000, 3702) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -624,8 +630,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `3719` - // Minimum execution time: 61_969_000 picoseconds. - Weight::from_parts(63_965_000, 3719) + // Minimum execution time: 61_699_000 picoseconds. + Weight::from_parts(63_605_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -641,8 +647,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `901` // Estimated: `4764` - // Minimum execution time: 65_462_000 picoseconds. - Weight::from_parts(67_250_000, 4764) + // Minimum execution time: 64_930_000 picoseconds. + Weight::from_parts(66_068_000, 4764) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -686,8 +692,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3425` // Estimated: `8877` - // Minimum execution time: 184_295_000 picoseconds. - Weight::from_parts(188_860_000, 8877) + // Minimum execution time: 181_861_000 picoseconds. + Weight::from_parts(186_375_000, 8877) .saturating_add(RocksDbWeight::get().reads(20_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -719,8 +725,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3435` // Estimated: `8877` - // Minimum execution time: 188_777_000 picoseconds. - Weight::from_parts(192_646_000, 8877) + // Minimum execution time: 182_273_000 picoseconds. + Weight::from_parts(186_635_000, 8877) .saturating_add(RocksDbWeight::get().reads(17_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -754,8 +760,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3500` // Estimated: `8877` - // Minimum execution time: 221_728_000 picoseconds. - Weight::from_parts(227_569_000, 8877) + // Minimum execution time: 217_878_000 picoseconds. + Weight::from_parts(221_493_000, 8877) .saturating_add(RocksDbWeight::get().reads(18_u64)) .saturating_add(RocksDbWeight::get().writes(14_u64)) } @@ -775,8 +781,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1172` // Estimated: `3719` - // Minimum execution time: 75_310_000 picoseconds. - Weight::from_parts(77_709_000, 3719) + // Minimum execution time: 74_509_000 picoseconds. + Weight::from_parts(76_683_000, 3719) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -816,8 +822,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3622` // Estimated: `27847` - // Minimum execution time: 170_656_000 picoseconds. - Weight::from_parts(174_950_000, 27847) + // Minimum execution time: 166_424_000 picoseconds. + Weight::from_parts(169_698_000, 27847) .saturating_add(RocksDbWeight::get().reads(20_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -833,18 +839,20 @@ impl WeightInfo for () { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1817` + // Measured: `1848` // Estimated: `4764` - // Minimum execution time: 68_866_000 picoseconds. - Weight::from_parts(72_312_887, 4764) - // Standard Error: 1_635 - .saturating_add(Weight::from_parts(41_679, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Minimum execution time: 66_110_000 picoseconds. + Weight::from_parts(68_620_141, 4764) + // Standard Error: 1_379 + .saturating_add(Weight::from_parts(54_961, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -865,6 +873,8 @@ impl WeightInfo for () { /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) @@ -874,13 +884,13 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2207` + // Measured: `2238` // Estimated: `27847` - // Minimum execution time: 131_383_000 picoseconds. - Weight::from_parts(136_595_971, 27847) - // Standard Error: 2_715 - .saturating_add(Weight::from_parts(52_351, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Minimum execution time: 125_669_000 picoseconds. + Weight::from_parts(130_907_641, 27847) + // Standard Error: 2_490 + .saturating_add(Weight::from_parts(75_219, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(9_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -907,12 +917,12 @@ impl WeightInfo for () { /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) - /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForReversePoolIdLookup` (r:1 w:1) /// Proof: `NominationPools::CounterForReversePoolIdLookup` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) @@ -934,8 +944,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2525` // Estimated: `27847` - // Minimum execution time: 233_314_000 picoseconds. - Weight::from_parts(241_694_316, 27847) + // Minimum execution time: 225_700_000 picoseconds. + Weight::from_parts(234_390_990, 27847) .saturating_add(RocksDbWeight::get().reads(24_u64)) .saturating_add(RocksDbWeight::get().writes(20_u64)) } @@ -987,8 +997,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1169` // Estimated: `8538` - // Minimum execution time: 171_465_000 picoseconds. - Weight::from_parts(176_478_000, 8538) + // Minimum execution time: 167_171_000 picoseconds. + Weight::from_parts(170_531_000, 8538) .saturating_add(RocksDbWeight::get().reads(23_u64)) .saturating_add(RocksDbWeight::get().writes(17_u64)) } @@ -1021,10 +1031,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1808` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 63_588_000 picoseconds. - Weight::from_parts(64_930_584, 4556) - // Standard Error: 9_167 - .saturating_add(Weight::from_parts(1_595_779, 0).saturating_mul(n.into())) + // Minimum execution time: 63_785_000 picoseconds. + Weight::from_parts(64_592_302, 4556) + // Standard Error: 9_416 + .saturating_add(Weight::from_parts(1_524_398, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -1040,8 +1050,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1434` // Estimated: `4556` - // Minimum execution time: 32_899_000 picoseconds. - Weight::from_parts(33_955_000, 4556) + // Minimum execution time: 32_096_000 picoseconds. + Weight::from_parts(33_533_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1056,10 +1066,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3735` - // Minimum execution time: 13_778_000 picoseconds. - Weight::from_parts(14_770_006, 3735) - // Standard Error: 151 - .saturating_add(Weight::from_parts(1_900, 0).saturating_mul(n.into())) + // Minimum execution time: 13_914_000 picoseconds. + Weight::from_parts(14_800_402, 3735) + // Standard Error: 146 + .saturating_add(Weight::from_parts(940, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1079,8 +1089,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_550_000 picoseconds. - Weight::from_parts(4_935_000, 0) + // Minimum execution time: 4_373_000 picoseconds. + Weight::from_parts(4_592_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -1089,8 +1099,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_759_000 picoseconds. - Weight::from_parts(17_346_000, 3719) + // Minimum execution time: 16_662_000 picoseconds. + Weight::from_parts(17_531_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1116,8 +1126,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1971` // Estimated: `4556` - // Minimum execution time: 61_970_000 picoseconds. - Weight::from_parts(63_738_000, 4556) + // Minimum execution time: 61_348_000 picoseconds. + Weight::from_parts(63_712_000, 4556) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1133,8 +1143,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `804` // Estimated: `3719` - // Minimum execution time: 31_950_000 picoseconds. - Weight::from_parts(33_190_000, 3719) + // Minimum execution time: 32_232_000 picoseconds. + Weight::from_parts(33_433_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1146,8 +1156,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `572` // Estimated: `3719` - // Minimum execution time: 16_807_000 picoseconds. - Weight::from_parts(17_733_000, 3719) + // Minimum execution time: 16_774_000 picoseconds. + Weight::from_parts(17_671_000, 3719) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1157,8 +1167,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_710_000 picoseconds. - Weight::from_parts(17_563_000, 3719) + // Minimum execution time: 16_724_000 picoseconds. + Weight::from_parts(17_181_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1168,8 +1178,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_493_000 picoseconds. - Weight::from_parts(17_022_000, 3719) + // Minimum execution time: 16_362_000 picoseconds. + Weight::from_parts(17_135_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1181,8 +1191,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 14_248_000 picoseconds. - Weight::from_parts(15_095_000, 3702) + // Minimum execution time: 14_125_000 picoseconds. + Weight::from_parts(14_705_000, 3702) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1198,8 +1208,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `3719` - // Minimum execution time: 61_969_000 picoseconds. - Weight::from_parts(63_965_000, 3719) + // Minimum execution time: 61_699_000 picoseconds. + Weight::from_parts(63_605_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1215,8 +1225,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `901` // Estimated: `4764` - // Minimum execution time: 65_462_000 picoseconds. - Weight::from_parts(67_250_000, 4764) + // Minimum execution time: 64_930_000 picoseconds. + Weight::from_parts(66_068_000, 4764) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 01ad8d64f100672dc36929d28af22e41f8a88c60..b3260f0841f06490246a0c0f079864d5f1224a0e 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -148,7 +148,7 @@ parameter_types! { pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); } -pub type Extrinsic = sp_runtime::testing::TestXt; +pub type Extrinsic = sp_runtime::generic::UncheckedExtrinsic; pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { diff --git a/substrate/frame/parameters/src/weights.rs b/substrate/frame/parameters/src/weights.rs index 6746960b1b711995b952f6b5784e71debc41a688..340eb9e31b77c507deb1d81794980f856d01971d 100644 --- a/substrate/frame/parameters/src/weights.rs +++ b/substrate/frame/parameters/src/weights.rs @@ -18,25 +18,27 @@ //! Autogenerated weights for `pallet_parameters` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_parameters +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_parameters -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/parameters/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -55,22 +57,30 @@ pub trait WeightInfo { /// Weights for `pallet_parameters` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + /// Storage: `Parameters::Parameters` (r:1 w:1) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn set_parameter() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) + // Measured: `3` + // Estimated: `3501` + // Minimum execution time: 8_400_000 picoseconds. + Weight::from_parts(8_682_000, 3501) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } } // For backwards compatibility and tests. impl WeightInfo for () { + /// Storage: `Parameters::Parameters` (r:1 w:1) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn set_parameter() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) + // Measured: `3` + // Estimated: `3501` + // Minimum execution time: 8_400_000 picoseconds. + Weight::from_parts(8_682_000, 3501) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs index e344bdfe2d8feff5a25ed686d7301f462fa41189..4e474685166631ba41eed644d3754a332a290663 100644 --- a/substrate/frame/preimage/src/lib.rs +++ b/substrate/frame/preimage/src/lib.rs @@ -102,7 +102,7 @@ pub const MAX_HASH_UPGRADE_BULK_COUNT: u32 = 1024; pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::config] diff --git a/substrate/frame/preimage/src/weights.rs b/substrate/frame/preimage/src/weights.rs index c11ab74c1e551a0fcdd6e239a657ab5df0809aad..6167cd5302918ab0da7f1a243ccfeb4743f23540 100644 --- a/substrate/frame/preimage/src/weights.rs +++ b/substrate/frame/preimage/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_preimage` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-mia4uyug-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_preimage +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_preimage -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/preimage/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -70,200 +72,209 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3556` - // Minimum execution time: 15_936_000 picoseconds. - Weight::from_parts(16_271_000, 3556) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_916, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Measured: `112` + // Estimated: `6012` + // Minimum execution time: 48_893_000 picoseconds. + Weight::from_parts(44_072_327, 6012) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_684, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 16_468_000 picoseconds. - Weight::from_parts(17_031_000, 3556) + // Minimum execution time: 15_675_000 picoseconds. + Weight::from_parts(4_564_145, 3556) // Standard Error: 2 - .saturating_add(Weight::from_parts(1_948, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_678, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 16_342_000 picoseconds. - Weight::from_parts(16_535_000, 3556) + // Minimum execution time: 14_959_000 picoseconds. + Weight::from_parts(15_335_000, 3556) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_906, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_687, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `3556` - // Minimum execution time: 31_047_000 picoseconds. - Weight::from_parts(34_099_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Measured: `311` + // Estimated: `3658` + // Minimum execution time: 47_378_000 picoseconds. + Weight::from_parts(48_776_000, 3658) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 32_559_000 picoseconds. - Weight::from_parts(36_677_000, 3556) + // Minimum execution time: 20_939_000 picoseconds. + Weight::from_parts(21_577_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `172` + // Measured: `255` // Estimated: `3556` - // Minimum execution time: 27_887_000 picoseconds. - Weight::from_parts(30_303_000, 3556) + // Minimum execution time: 17_945_000 picoseconds. + Weight::from_parts(18_448_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 17_256_000 picoseconds. - Weight::from_parts(19_481_000, 3556) + // Minimum execution time: 12_132_000 picoseconds. + Weight::from_parts(12_710_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `109` // Estimated: `3556` - // Minimum execution time: 22_344_000 picoseconds. - Weight::from_parts(23_868_000, 3556) + // Minimum execution time: 13_014_000 picoseconds. + Weight::from_parts(13_726_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_542_000 picoseconds. - Weight::from_parts(11_571_000, 3556) + // Minimum execution time: 9_785_000 picoseconds. + Weight::from_parts(10_266_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 29_054_000 picoseconds. - Weight::from_parts(32_996_000, 3556) + // Minimum execution time: 18_764_000 picoseconds. + Weight::from_parts(19_635_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_775_000 picoseconds. - Weight::from_parts(11_937_000, 3556) + // Minimum execution time: 9_624_000 picoseconds. + Weight::from_parts(10_044_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_696_000 picoseconds. - Weight::from_parts(11_717_000, 3556) + // Minimum execution time: 9_432_000 picoseconds. + Weight::from_parts(9_991_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Preimage::StatusFor` (r:1024 w:1024) + /// Storage: `Preimage::StatusFor` (r:1023 w:1023) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:1023 w:1023) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:0 w:1024) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1024]`. + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1023 w:1023) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:0 w:1023) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 1024]`. fn ensure_updated(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + n * (91 ±0)` - // Estimated: `3593 + n * (2566 ±0)` - // Minimum execution time: 2_452_000 picoseconds. - Weight::from_parts(2_641_000, 3593) - // Standard Error: 19_797 - .saturating_add(Weight::from_parts(15_620_946, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) + // Measured: `0 + n * (227 ±0)` + // Estimated: `6012 + n * (2668 ±0)` + // Minimum execution time: 54_056_000 picoseconds. + Weight::from_parts(54_912_000, 6012) + // Standard Error: 42_469 + .saturating_add(Weight::from_parts(50_710_258, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2668).saturating_mul(n.into())) } } @@ -272,199 +283,208 @@ impl WeightInfo for () { /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3556` - // Minimum execution time: 15_936_000 picoseconds. - Weight::from_parts(16_271_000, 3556) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_916, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Measured: `112` + // Estimated: `6012` + // Minimum execution time: 48_893_000 picoseconds. + Weight::from_parts(44_072_327, 6012) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_684, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 16_468_000 picoseconds. - Weight::from_parts(17_031_000, 3556) + // Minimum execution time: 15_675_000 picoseconds. + Weight::from_parts(4_564_145, 3556) // Standard Error: 2 - .saturating_add(Weight::from_parts(1_948, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_678, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 16_342_000 picoseconds. - Weight::from_parts(16_535_000, 3556) + // Minimum execution time: 14_959_000 picoseconds. + Weight::from_parts(15_335_000, 3556) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_906, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_687, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `3556` - // Minimum execution time: 31_047_000 picoseconds. - Weight::from_parts(34_099_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Measured: `311` + // Estimated: `3658` + // Minimum execution time: 47_378_000 picoseconds. + Weight::from_parts(48_776_000, 3658) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 32_559_000 picoseconds. - Weight::from_parts(36_677_000, 3556) + // Minimum execution time: 20_939_000 picoseconds. + Weight::from_parts(21_577_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `172` + // Measured: `255` // Estimated: `3556` - // Minimum execution time: 27_887_000 picoseconds. - Weight::from_parts(30_303_000, 3556) + // Minimum execution time: 17_945_000 picoseconds. + Weight::from_parts(18_448_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 17_256_000 picoseconds. - Weight::from_parts(19_481_000, 3556) + // Minimum execution time: 12_132_000 picoseconds. + Weight::from_parts(12_710_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `109` // Estimated: `3556` - // Minimum execution time: 22_344_000 picoseconds. - Weight::from_parts(23_868_000, 3556) + // Minimum execution time: 13_014_000 picoseconds. + Weight::from_parts(13_726_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_542_000 picoseconds. - Weight::from_parts(11_571_000, 3556) + // Minimum execution time: 9_785_000 picoseconds. + Weight::from_parts(10_266_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 29_054_000 picoseconds. - Weight::from_parts(32_996_000, 3556) + // Minimum execution time: 18_764_000 picoseconds. + Weight::from_parts(19_635_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_775_000 picoseconds. - Weight::from_parts(11_937_000, 3556) + // Minimum execution time: 9_624_000 picoseconds. + Weight::from_parts(10_044_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_696_000 picoseconds. - Weight::from_parts(11_717_000, 3556) + // Minimum execution time: 9_432_000 picoseconds. + Weight::from_parts(9_991_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Preimage::StatusFor` (r:1024 w:1024) + /// Storage: `Preimage::StatusFor` (r:1023 w:1023) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:1023 w:1023) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:0 w:1024) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1024]`. + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1023 w:1023) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:0 w:1023) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 1024]`. fn ensure_updated(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + n * (91 ±0)` - // Estimated: `3593 + n * (2566 ±0)` - // Minimum execution time: 2_452_000 picoseconds. - Weight::from_parts(2_641_000, 3593) - // Standard Error: 19_797 - .saturating_add(Weight::from_parts(15_620_946, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) + // Measured: `0 + n * (227 ±0)` + // Estimated: `6012 + n * (2668 ±0)` + // Minimum execution time: 54_056_000 picoseconds. + Weight::from_parts(54_912_000, 6012) + // Standard Error: 42_469 + .saturating_add(Weight::from_parts(50_710_258, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2668).saturating_mul(n.into())) } } diff --git a/substrate/frame/proxy/src/weights.rs b/substrate/frame/proxy/src/weights.rs index f30fe73d27ae665fca89761585226d906fd5112c..3c37c91d500e640d7c36e9b056f193e48162630b 100644 --- a/substrate/frame/proxy/src/weights.rs +++ b/substrate/frame/proxy/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_proxy +//! Autogenerated weights for `pallet_proxy` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/proxy/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/proxy/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_proxy. +/// Weight functions needed for `pallet_proxy`. pub trait WeightInfo { fn proxy(p: u32, ) -> Weight; fn proxy_announced(a: u32, p: u32, ) -> Weight; @@ -64,336 +63,352 @@ pub trait WeightInfo { fn kill_pure(p: u32, ) -> Weight; } -/// Weights for pallet_proxy using the Substrate node and recommended hardware. +/// Weights for `pallet_proxy` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 ±0)` + // Measured: `306 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 15_182_000 picoseconds. - Weight::from_parts(15_919_146, 4706) - // Standard Error: 1_586 - .saturating_add(Weight::from_parts(31_768, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 18_437_000 picoseconds. + Weight::from_parts(19_610_577, 4706) + // Standard Error: 2_531 + .saturating_add(Weight::from_parts(26_001, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `488 + a * (68 ±0) + p * (37 ±0)` + // Measured: `633 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 40_256_000 picoseconds. - Weight::from_parts(40_373_648, 5698) - // Standard Error: 3_978 - .saturating_add(Weight::from_parts(166_936, 0).saturating_mul(a.into())) - // Standard Error: 4_110 - .saturating_add(Weight::from_parts(54_329, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Minimum execution time: 40_426_000 picoseconds. + Weight::from_parts(40_200_295, 5698) + // Standard Error: 2_922 + .saturating_add(Weight::from_parts(161_885, 0).saturating_mul(a.into())) + // Standard Error: 3_019 + .saturating_add(Weight::from_parts(69_710, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 25_040_000 picoseconds. - Weight::from_parts(25_112_188, 5698) - // Standard Error: 2_143 - .saturating_add(Weight::from_parts(189_027, 0).saturating_mul(a.into())) - // Standard Error: 2_214 - .saturating_add(Weight::from_parts(26_683, 0).saturating_mul(p.into())) + // Minimum execution time: 21_905_000 picoseconds. + Weight::from_parts(22_717_430, 5698) + // Standard Error: 2_004 + .saturating_add(Weight::from_parts(153_390, 0).saturating_mul(a.into())) + // Standard Error: 2_071 + .saturating_add(Weight::from_parts(5_676, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 24_884_000 picoseconds. - Weight::from_parts(25_359_291, 5698) - // Standard Error: 2_019 - .saturating_add(Weight::from_parts(181_470, 0).saturating_mul(a.into())) - // Standard Error: 2_086 - .saturating_add(Weight::from_parts(17_725, 0).saturating_mul(p.into())) + // Minimum execution time: 21_974_000 picoseconds. + Weight::from_parts(22_484_324, 5698) + // Standard Error: 1_846 + .saturating_add(Weight::from_parts(153_904, 0).saturating_mul(a.into())) + // Standard Error: 1_907 + .saturating_add(Weight::from_parts(9_616, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `420 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 35_039_000 picoseconds. - Weight::from_parts(36_727_868, 5698) - // Standard Error: 4_463 - .saturating_add(Weight::from_parts(167_060, 0).saturating_mul(a.into())) - // Standard Error: 4_611 - .saturating_add(Weight::from_parts(59_836, 0).saturating_mul(p.into())) + // Minimum execution time: 30_454_000 picoseconds. + Weight::from_parts(32_128_158, 5698) + // Standard Error: 3_778 + .saturating_add(Weight::from_parts(137_366, 0).saturating_mul(a.into())) + // Standard Error: 3_904 + .saturating_add(Weight::from_parts(53_040, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_697_000 picoseconds. - Weight::from_parts(26_611_090, 4706) - // Standard Error: 2_306 - .saturating_add(Weight::from_parts(85_165, 0).saturating_mul(p.into())) + // Minimum execution time: 21_391_000 picoseconds. + Weight::from_parts(22_202_614, 4706) + // Standard Error: 1_750 + .saturating_add(Weight::from_parts(49_639, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_638_000 picoseconds. - Weight::from_parts(26_904_510, 4706) - // Standard Error: 2_669 - .saturating_add(Weight::from_parts(61_668, 0).saturating_mul(p.into())) + // Minimum execution time: 21_375_000 picoseconds. + Weight::from_parts(22_392_601, 4706) + // Standard Error: 2_415 + .saturating_add(Weight::from_parts(40_345, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_737_000 picoseconds. - Weight::from_parts(23_618_441, 4706) - // Standard Error: 1_729 - .saturating_add(Weight::from_parts(44_009, 0).saturating_mul(p.into())) + // Minimum execution time: 19_833_000 picoseconds. + Weight::from_parts(20_839_747, 4706) + // Standard Error: 1_742 + .saturating_add(Weight::from_parts(40_874, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `173` // Estimated: `4706` - // Minimum execution time: 27_364_000 picoseconds. - Weight::from_parts(28_632_271, 4706) - // Standard Error: 1_613 - .saturating_add(Weight::from_parts(2_453, 0).saturating_mul(p.into())) + // Minimum execution time: 22_231_000 picoseconds. + Weight::from_parts(23_370_995, 4706) + // Standard Error: 1_521 + .saturating_add(Weight::from_parts(4_892, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `198 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 23_552_000 picoseconds. - Weight::from_parts(24_874_553, 4706) - // Standard Error: 1_919 - .saturating_add(Weight::from_parts(38_799, 0).saturating_mul(p.into())) + // Minimum execution time: 20_614_000 picoseconds. + Weight::from_parts(21_845_970, 4706) + // Standard Error: 1_636 + .saturating_add(Weight::from_parts(34_480, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 ±0)` + // Measured: `306 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 15_182_000 picoseconds. - Weight::from_parts(15_919_146, 4706) - // Standard Error: 1_586 - .saturating_add(Weight::from_parts(31_768, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 18_437_000 picoseconds. + Weight::from_parts(19_610_577, 4706) + // Standard Error: 2_531 + .saturating_add(Weight::from_parts(26_001, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `488 + a * (68 ±0) + p * (37 ±0)` + // Measured: `633 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 40_256_000 picoseconds. - Weight::from_parts(40_373_648, 5698) - // Standard Error: 3_978 - .saturating_add(Weight::from_parts(166_936, 0).saturating_mul(a.into())) - // Standard Error: 4_110 - .saturating_add(Weight::from_parts(54_329, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Minimum execution time: 40_426_000 picoseconds. + Weight::from_parts(40_200_295, 5698) + // Standard Error: 2_922 + .saturating_add(Weight::from_parts(161_885, 0).saturating_mul(a.into())) + // Standard Error: 3_019 + .saturating_add(Weight::from_parts(69_710, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 25_040_000 picoseconds. - Weight::from_parts(25_112_188, 5698) - // Standard Error: 2_143 - .saturating_add(Weight::from_parts(189_027, 0).saturating_mul(a.into())) - // Standard Error: 2_214 - .saturating_add(Weight::from_parts(26_683, 0).saturating_mul(p.into())) + // Minimum execution time: 21_905_000 picoseconds. + Weight::from_parts(22_717_430, 5698) + // Standard Error: 2_004 + .saturating_add(Weight::from_parts(153_390, 0).saturating_mul(a.into())) + // Standard Error: 2_071 + .saturating_add(Weight::from_parts(5_676, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 24_884_000 picoseconds. - Weight::from_parts(25_359_291, 5698) - // Standard Error: 2_019 - .saturating_add(Weight::from_parts(181_470, 0).saturating_mul(a.into())) - // Standard Error: 2_086 - .saturating_add(Weight::from_parts(17_725, 0).saturating_mul(p.into())) + // Minimum execution time: 21_974_000 picoseconds. + Weight::from_parts(22_484_324, 5698) + // Standard Error: 1_846 + .saturating_add(Weight::from_parts(153_904, 0).saturating_mul(a.into())) + // Standard Error: 1_907 + .saturating_add(Weight::from_parts(9_616, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `420 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 35_039_000 picoseconds. - Weight::from_parts(36_727_868, 5698) - // Standard Error: 4_463 - .saturating_add(Weight::from_parts(167_060, 0).saturating_mul(a.into())) - // Standard Error: 4_611 - .saturating_add(Weight::from_parts(59_836, 0).saturating_mul(p.into())) + // Minimum execution time: 30_454_000 picoseconds. + Weight::from_parts(32_128_158, 5698) + // Standard Error: 3_778 + .saturating_add(Weight::from_parts(137_366, 0).saturating_mul(a.into())) + // Standard Error: 3_904 + .saturating_add(Weight::from_parts(53_040, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_697_000 picoseconds. - Weight::from_parts(26_611_090, 4706) - // Standard Error: 2_306 - .saturating_add(Weight::from_parts(85_165, 0).saturating_mul(p.into())) + // Minimum execution time: 21_391_000 picoseconds. + Weight::from_parts(22_202_614, 4706) + // Standard Error: 1_750 + .saturating_add(Weight::from_parts(49_639, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_638_000 picoseconds. - Weight::from_parts(26_904_510, 4706) - // Standard Error: 2_669 - .saturating_add(Weight::from_parts(61_668, 0).saturating_mul(p.into())) + // Minimum execution time: 21_375_000 picoseconds. + Weight::from_parts(22_392_601, 4706) + // Standard Error: 2_415 + .saturating_add(Weight::from_parts(40_345, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `161 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_737_000 picoseconds. - Weight::from_parts(23_618_441, 4706) - // Standard Error: 1_729 - .saturating_add(Weight::from_parts(44_009, 0).saturating_mul(p.into())) + // Minimum execution time: 19_833_000 picoseconds. + Weight::from_parts(20_839_747, 4706) + // Standard Error: 1_742 + .saturating_add(Weight::from_parts(40_874, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `173` // Estimated: `4706` - // Minimum execution time: 27_364_000 picoseconds. - Weight::from_parts(28_632_271, 4706) - // Standard Error: 1_613 - .saturating_add(Weight::from_parts(2_453, 0).saturating_mul(p.into())) + // Minimum execution time: 22_231_000 picoseconds. + Weight::from_parts(23_370_995, 4706) + // Standard Error: 1_521 + .saturating_add(Weight::from_parts(4_892, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `198 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 23_552_000 picoseconds. - Weight::from_parts(24_874_553, 4706) - // Standard Error: 1_919 - .saturating_add(Weight::from_parts(38_799, 0).saturating_mul(p.into())) + // Minimum execution time: 20_614_000 picoseconds. + Weight::from_parts(21_845_970, 4706) + // Standard Error: 1_636 + .saturating_add(Weight::from_parts(34_480, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/ranked-collective/src/weights.rs b/substrate/frame/ranked-collective/src/weights.rs index 4ff0c3337d503fd66de4400ca40631cfcca300d9..5721858f7e2f1d53ada65c2adc689dc9bd8c5a3d 100644 --- a/substrate/frame/ranked-collective/src/weights.rs +++ b/substrate/frame/ranked-collective/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_ranked_collective +//! Autogenerated weights for `pallet_ranked_collective` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/ranked-collective/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/ranked-collective/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_ranked_collective. +/// Weight functions needed for `pallet_ranked_collective`. pub trait WeightInfo { fn add_member() -> Weight; fn remove_member(r: u32, ) -> Weight; @@ -61,283 +60,295 @@ pub trait WeightInfo { fn exchange_member() -> Weight; } -/// Weights for pallet_ranked_collective using the Substrate node and recommended hardware. +/// Weights for `pallet_ranked_collective` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn add_member() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3507` - // Minimum execution time: 17_245_000 picoseconds. - Weight::from_parts(17_930_000, 3507) + // Minimum execution time: 15_389_000 picoseconds. + Weight::from_parts(15_901_000, 3507) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:11 w:11) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:11 w:11) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:11 w:11) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:11 w:11) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:11 w:22) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:11 w:22) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. fn remove_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `616 + r * (281 ±0)` // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 29_534_000 picoseconds. - Weight::from_parts(32_847_495, 3519) - // Standard Error: 24_211 - .saturating_add(Weight::from_parts(13_949_639, 0).saturating_mul(r.into())) + // Minimum execution time: 29_541_000 picoseconds. + Weight::from_parts(32_239_358, 3519) + // Standard Error: 23_406 + .saturating_add(Weight::from_parts(16_030_191, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(6_u64)) + .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. fn promote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `314 + r * (17 ±0)` // Estimated: `3507` - // Minimum execution time: 20_333_000 picoseconds. - Weight::from_parts(21_592_224, 3507) - // Standard Error: 6_423 - .saturating_add(Weight::from_parts(321_314, 0).saturating_mul(r.into())) + // Minimum execution time: 17_939_000 picoseconds. + Weight::from_parts(19_290_416, 3507) + // Standard Error: 5_710 + .saturating_add(Weight::from_parts(374_399, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:1 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:1 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:1 w:2) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:1 w:2) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. fn demote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `632 + r * (72 ±0)` // Estimated: `3519` - // Minimum execution time: 29_446_000 picoseconds. - Weight::from_parts(32_447_715, 3519) - // Standard Error: 28_791 - .saturating_add(Weight::from_parts(822_890, 0).saturating_mul(r.into())) + // Minimum execution time: 29_609_000 picoseconds. + Weight::from_parts(32_686_167, 3519) + // Standard Error: 27_588 + .saturating_add(Weight::from_parts(789_212, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedPolls ReferendumInfoFor (r:1 w:1) - /// Proof: RankedPolls ReferendumInfoFor (max_values: None, max_size: Some(330), added: 2805, mode: MaxEncodedLen) - /// Storage: RankedCollective Voting (r:1 w:1) - /// Proof: RankedCollective Voting (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedPolls::ReferendumInfoFor` (r:1 w:1) + /// Proof: `RankedPolls::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(330), added: 2805, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Voting` (r:1 w:1) + /// Proof: `RankedCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote() -> Weight { // Proof Size summary in bytes: // Measured: `628` // Estimated: `219984` - // Minimum execution time: 45_474_000 picoseconds. - Weight::from_parts(47_228_000, 219984) + // Minimum execution time: 41_151_000 picoseconds. + Weight::from_parts(42_435_000, 219984) .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: RankedPolls ReferendumInfoFor (r:1 w:0) - /// Proof: RankedPolls ReferendumInfoFor (max_values: None, max_size: Some(330), added: 2805, mode: MaxEncodedLen) - /// Storage: RankedCollective VotingCleanup (r:1 w:0) - /// Proof: RankedCollective VotingCleanup (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: RankedCollective Voting (r:100 w:100) - /// Proof: RankedCollective Voting (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `RankedPolls::ReferendumInfoFor` (r:1 w:0) + /// Proof: `RankedPolls::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(330), added: 2805, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::VotingCleanup` (r:1 w:0) + /// Proof: `RankedCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Voting` (r:100 w:100) + /// Proof: `RankedCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. fn cleanup_poll(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `462 + n * (50 ±0)` // Estimated: `3795 + n * (2540 ±0)` - // Minimum execution time: 13_903_000 picoseconds. - Weight::from_parts(18_209_102, 3795) - // Standard Error: 2_556 - .saturating_add(Weight::from_parts(1_237_454, 0).saturating_mul(n.into())) + // Minimum execution time: 13_563_000 picoseconds. + Weight::from_parts(17_495_215, 3795) + // Standard Error: 2_294 + .saturating_add(Weight::from_parts(1_207_393, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2540).saturating_mul(n.into())) } - /// Storage: `RankedCollective::Members` (r:2 w:2) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:2 w:2) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `RankedCollective::IdToIndex` (r:2 w:2) + /// Storage: `RankedCollective::IdToIndex` (r:2 w:4) /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:2 w:2) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:0) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:2 w:2) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::IndexToId` (r:0 w:2) /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn exchange_member() -> Weight { // Proof Size summary in bytes: - // Measured: `437` - // Estimated: `6048` - // Minimum execution time: 138_000_000 picoseconds. - Weight::from_parts(141_000_000, 0) - .saturating_add(Weight::from_parts(0, 6048)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(8)) + // Measured: `625` + // Estimated: `19894` + // Minimum execution time: 70_713_000 picoseconds. + Weight::from_parts(72_831_000, 19894) + .saturating_add(T::DbWeight::get().reads(11_u64)) + .saturating_add(T::DbWeight::get().writes(14_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn add_member() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3507` - // Minimum execution time: 17_245_000 picoseconds. - Weight::from_parts(17_930_000, 3507) + // Minimum execution time: 15_389_000 picoseconds. + Weight::from_parts(15_901_000, 3507) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:11 w:11) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:11 w:11) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:11 w:11) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:11 w:11) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:11 w:22) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:11 w:22) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. fn remove_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `616 + r * (281 ±0)` // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 29_534_000 picoseconds. - Weight::from_parts(32_847_495, 3519) - // Standard Error: 24_211 - .saturating_add(Weight::from_parts(13_949_639, 0).saturating_mul(r.into())) + // Minimum execution time: 29_541_000 picoseconds. + Weight::from_parts(32_239_358, 3519) + // Standard Error: 23_406 + .saturating_add(Weight::from_parts(16_030_191, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. fn promote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `314 + r * (17 ±0)` // Estimated: `3507` - // Minimum execution time: 20_333_000 picoseconds. - Weight::from_parts(21_592_224, 3507) - // Standard Error: 6_423 - .saturating_add(Weight::from_parts(321_314, 0).saturating_mul(r.into())) + // Minimum execution time: 17_939_000 picoseconds. + Weight::from_parts(19_290_416, 3507) + // Standard Error: 5_710 + .saturating_add(Weight::from_parts(374_399, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:1 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:1 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:1 w:2) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:1 w:2) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. fn demote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `632 + r * (72 ±0)` // Estimated: `3519` - // Minimum execution time: 29_446_000 picoseconds. - Weight::from_parts(32_447_715, 3519) - // Standard Error: 28_791 - .saturating_add(Weight::from_parts(822_890, 0).saturating_mul(r.into())) + // Minimum execution time: 29_609_000 picoseconds. + Weight::from_parts(32_686_167, 3519) + // Standard Error: 27_588 + .saturating_add(Weight::from_parts(789_212, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedPolls ReferendumInfoFor (r:1 w:1) - /// Proof: RankedPolls ReferendumInfoFor (max_values: None, max_size: Some(330), added: 2805, mode: MaxEncodedLen) - /// Storage: RankedCollective Voting (r:1 w:1) - /// Proof: RankedCollective Voting (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedPolls::ReferendumInfoFor` (r:1 w:1) + /// Proof: `RankedPolls::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(330), added: 2805, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Voting` (r:1 w:1) + /// Proof: `RankedCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote() -> Weight { // Proof Size summary in bytes: // Measured: `628` // Estimated: `219984` - // Minimum execution time: 45_474_000 picoseconds. - Weight::from_parts(47_228_000, 219984) + // Minimum execution time: 41_151_000 picoseconds. + Weight::from_parts(42_435_000, 219984) .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: RankedPolls ReferendumInfoFor (r:1 w:0) - /// Proof: RankedPolls ReferendumInfoFor (max_values: None, max_size: Some(330), added: 2805, mode: MaxEncodedLen) - /// Storage: RankedCollective VotingCleanup (r:1 w:0) - /// Proof: RankedCollective VotingCleanup (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: RankedCollective Voting (r:100 w:100) - /// Proof: RankedCollective Voting (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `RankedPolls::ReferendumInfoFor` (r:1 w:0) + /// Proof: `RankedPolls::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(330), added: 2805, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::VotingCleanup` (r:1 w:0) + /// Proof: `RankedCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Voting` (r:100 w:100) + /// Proof: `RankedCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. fn cleanup_poll(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `462 + n * (50 ±0)` // Estimated: `3795 + n * (2540 ±0)` - // Minimum execution time: 13_903_000 picoseconds. - Weight::from_parts(18_209_102, 3795) - // Standard Error: 2_556 - .saturating_add(Weight::from_parts(1_237_454, 0).saturating_mul(n.into())) + // Minimum execution time: 13_563_000 picoseconds. + Weight::from_parts(17_495_215, 3795) + // Standard Error: 2_294 + .saturating_add(Weight::from_parts(1_207_393, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2540).saturating_mul(n.into())) } - /// Storage: `RankedCollective::Members` (r:2 w:2) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:2 w:2) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `RankedCollective::IdToIndex` (r:2 w:2) + /// Storage: `RankedCollective::IdToIndex` (r:2 w:4) /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:2 w:2) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:0) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:2 w:2) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::IndexToId` (r:0 w:2) /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn exchange_member() -> Weight { // Proof Size summary in bytes: - // Measured: `437` - // Estimated: `6048` - // Minimum execution time: 138_000_000 picoseconds. - Weight::from_parts(141_000_000, 0) - .saturating_add(Weight::from_parts(0, 6048)) - .saturating_add(RocksDbWeight::get().reads(6)) - .saturating_add(RocksDbWeight::get().writes(8)) + // Measured: `625` + // Estimated: `19894` + // Minimum execution time: 70_713_000 picoseconds. + Weight::from_parts(72_831_000, 19894) + .saturating_add(RocksDbWeight::get().reads(11_u64)) + .saturating_add(RocksDbWeight::get().writes(14_u64)) } } diff --git a/substrate/frame/recovery/src/weights.rs b/substrate/frame/recovery/src/weights.rs index 84b19ae694eec1107b9229c1894646cb8368bc0d..fe5dbcfc2222d2a53cf1f6d7438bb2175470d1f2 100644 --- a/substrate/frame/recovery/src/weights.rs +++ b/substrate/frame/recovery/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_recovery +//! Autogenerated weights for `pallet_recovery` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/recovery/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/recovery/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_recovery. +/// Weight functions needed for `pallet_recovery`. pub trait WeightInfo { fn as_recovered() -> Weight; fn set_recovered() -> Weight; @@ -63,258 +62,266 @@ pub trait WeightInfo { fn cancel_recovered() -> Weight; } -/// Weights for pallet_recovery using the Substrate node and recommended hardware. +/// Weights for `pallet_recovery` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Recovery Proxy (r:1 w:0) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Proxy` (r:1 w:0) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `281` - // Estimated: `3545` - // Minimum execution time: 9_360_000 picoseconds. - Weight::from_parts(9_773_000, 3545) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Measured: `497` + // Estimated: `3997` + // Minimum execution time: 14_898_000 picoseconds. + Weight::from_parts(15_464_000, 3997) + .saturating_add(T::DbWeight::get().reads(3_u64)) } - /// Storage: Recovery Proxy (r:0 w:1) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Proxy` (r:0 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) fn set_recovered() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_146_000 picoseconds. - Weight::from_parts(9_507_000, 0) + // Minimum execution time: 7_424_000 picoseconds. + Weight::from_parts(7_830_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:1) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn create_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `175` + // Measured: `246` // Estimated: `3816` - // Minimum execution time: 26_472_000 picoseconds. - Weight::from_parts(27_917_651, 3816) - // Standard Error: 7_129 - .saturating_add(Weight::from_parts(59_239, 0).saturating_mul(n.into())) + // Minimum execution time: 21_968_000 picoseconds. + Weight::from_parts(23_594_232, 3816) + // Standard Error: 5_848 + .saturating_add(Weight::from_parts(24_843, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:0) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) - /// Storage: Recovery ActiveRecoveries (r:1 w:1) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) fn initiate_recovery() -> Weight { // Proof Size summary in bytes: - // Measured: `272` + // Measured: `343` // Estimated: `3854` - // Minimum execution time: 29_618_000 picoseconds. - Weight::from_parts(30_192_000, 3854) + // Minimum execution time: 25_494_000 picoseconds. + Weight::from_parts(26_290_000, 3854) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:0) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) - /// Storage: Recovery ActiveRecoveries (r:1 w:1) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn vouch_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `360 + n * (64 ±0)` + // Measured: `431 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 19_464_000 picoseconds. - Weight::from_parts(20_642_522, 3854) - // Standard Error: 5_974 - .saturating_add(Weight::from_parts(142_308, 0).saturating_mul(n.into())) + // Minimum execution time: 17_044_000 picoseconds. + Weight::from_parts(18_299_617, 3854) + // Standard Error: 5_580 + .saturating_add(Weight::from_parts(187_568, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:0) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) - /// Storage: Recovery ActiveRecoveries (r:1 w:0) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) - /// Storage: Recovery Proxy (r:1 w:1) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn claim_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `392 + n * (64 ±0)` + // Measured: `463 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 23_656_000 picoseconds. - Weight::from_parts(24_903_269, 3854) - // Standard Error: 5_771 - .saturating_add(Weight::from_parts(117_343, 0).saturating_mul(n.into())) + // Minimum execution time: 22_186_000 picoseconds. + Weight::from_parts(23_476_807, 3854) + // Standard Error: 6_392 + .saturating_add(Weight::from_parts(89_770, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Recovery ActiveRecoveries (r:1 w:1) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn close_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `513 + n * (32 ±0)` + // Measured: `584 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 34_866_000 picoseconds. - Weight::from_parts(36_368_748, 3854) - // Standard Error: 6_600 - .saturating_add(Weight::from_parts(118_610, 0).saturating_mul(n.into())) + // Minimum execution time: 31_045_000 picoseconds. + Weight::from_parts(32_623_578, 3854) + // Standard Error: 7_203 + .saturating_add(Weight::from_parts(61_466, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Recovery ActiveRecoveries (r:1 w:0) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) - /// Storage: Recovery Recoverable (r:1 w:1) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn remove_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `270 + n * (32 ±0)` + // Measured: `341 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 31_405_000 picoseconds. - Weight::from_parts(32_552_838, 3854) - // Standard Error: 8_043 - .saturating_add(Weight::from_parts(171_605, 0).saturating_mul(n.into())) + // Minimum execution time: 27_925_000 picoseconds. + Weight::from_parts(29_258_264, 3854) + // Standard Error: 8_192 + .saturating_add(Weight::from_parts(128_124, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Recovery Proxy (r:1 w:1) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) fn cancel_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `281` + // Measured: `352` // Estimated: `3545` - // Minimum execution time: 11_530_000 picoseconds. - Weight::from_parts(11_851_000, 3545) + // Minimum execution time: 11_623_000 picoseconds. + Weight::from_parts(12_043_000, 3545) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Recovery Proxy (r:1 w:0) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Proxy` (r:1 w:0) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `281` - // Estimated: `3545` - // Minimum execution time: 9_360_000 picoseconds. - Weight::from_parts(9_773_000, 3545) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Measured: `497` + // Estimated: `3997` + // Minimum execution time: 14_898_000 picoseconds. + Weight::from_parts(15_464_000, 3997) + .saturating_add(RocksDbWeight::get().reads(3_u64)) } - /// Storage: Recovery Proxy (r:0 w:1) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Proxy` (r:0 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) fn set_recovered() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_146_000 picoseconds. - Weight::from_parts(9_507_000, 0) + // Minimum execution time: 7_424_000 picoseconds. + Weight::from_parts(7_830_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:1) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn create_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `175` + // Measured: `246` // Estimated: `3816` - // Minimum execution time: 26_472_000 picoseconds. - Weight::from_parts(27_917_651, 3816) - // Standard Error: 7_129 - .saturating_add(Weight::from_parts(59_239, 0).saturating_mul(n.into())) + // Minimum execution time: 21_968_000 picoseconds. + Weight::from_parts(23_594_232, 3816) + // Standard Error: 5_848 + .saturating_add(Weight::from_parts(24_843, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:0) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) - /// Storage: Recovery ActiveRecoveries (r:1 w:1) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) fn initiate_recovery() -> Weight { // Proof Size summary in bytes: - // Measured: `272` + // Measured: `343` // Estimated: `3854` - // Minimum execution time: 29_618_000 picoseconds. - Weight::from_parts(30_192_000, 3854) + // Minimum execution time: 25_494_000 picoseconds. + Weight::from_parts(26_290_000, 3854) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:0) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) - /// Storage: Recovery ActiveRecoveries (r:1 w:1) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn vouch_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `360 + n * (64 ±0)` + // Measured: `431 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 19_464_000 picoseconds. - Weight::from_parts(20_642_522, 3854) - // Standard Error: 5_974 - .saturating_add(Weight::from_parts(142_308, 0).saturating_mul(n.into())) + // Minimum execution time: 17_044_000 picoseconds. + Weight::from_parts(18_299_617, 3854) + // Standard Error: 5_580 + .saturating_add(Weight::from_parts(187_568, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:0) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) - /// Storage: Recovery ActiveRecoveries (r:1 w:0) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) - /// Storage: Recovery Proxy (r:1 w:1) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn claim_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `392 + n * (64 ±0)` + // Measured: `463 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 23_656_000 picoseconds. - Weight::from_parts(24_903_269, 3854) - // Standard Error: 5_771 - .saturating_add(Weight::from_parts(117_343, 0).saturating_mul(n.into())) + // Minimum execution time: 22_186_000 picoseconds. + Weight::from_parts(23_476_807, 3854) + // Standard Error: 6_392 + .saturating_add(Weight::from_parts(89_770, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Recovery ActiveRecoveries (r:1 w:1) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn close_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `513 + n * (32 ±0)` + // Measured: `584 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 34_866_000 picoseconds. - Weight::from_parts(36_368_748, 3854) - // Standard Error: 6_600 - .saturating_add(Weight::from_parts(118_610, 0).saturating_mul(n.into())) + // Minimum execution time: 31_045_000 picoseconds. + Weight::from_parts(32_623_578, 3854) + // Standard Error: 7_203 + .saturating_add(Weight::from_parts(61_466, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Recovery ActiveRecoveries (r:1 w:0) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) - /// Storage: Recovery Recoverable (r:1 w:1) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn remove_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `270 + n * (32 ±0)` + // Measured: `341 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 31_405_000 picoseconds. - Weight::from_parts(32_552_838, 3854) - // Standard Error: 8_043 - .saturating_add(Weight::from_parts(171_605, 0).saturating_mul(n.into())) + // Minimum execution time: 27_925_000 picoseconds. + Weight::from_parts(29_258_264, 3854) + // Standard Error: 8_192 + .saturating_add(Weight::from_parts(128_124, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Recovery Proxy (r:1 w:1) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) fn cancel_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `281` + // Measured: `352` // Estimated: `3545` - // Minimum execution time: 11_530_000 picoseconds. - Weight::from_parts(11_851_000, 3545) + // Minimum execution time: 11_623_000 picoseconds. + Weight::from_parts(12_043_000, 3545) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/referenda/src/lib.rs b/substrate/frame/referenda/src/lib.rs index c5bf2266e672542b77c79bee71176f8e526018d4..e616056c3022abe796cd494e91fa4e9565843e60 100644 --- a/substrate/frame/referenda/src/lib.rs +++ b/substrate/frame/referenda/src/lib.rs @@ -143,7 +143,7 @@ pub mod pallet { use frame_support::{pallet_prelude::*, traits::EnsureOriginWithArg}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/referenda/src/migration.rs b/substrate/frame/referenda/src/migration.rs index a80897242eec67aad194f8deaf1cd6527533eab9..631eb7340e567e8115e1755d513980fb148700c5 100644 --- a/substrate/frame/referenda/src/migration.rs +++ b/substrate/frame/referenda/src/migration.rs @@ -109,16 +109,16 @@ pub mod v1 { } fn on_runtime_upgrade() -> Weight { - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); let mut weight = T::DbWeight::get().reads(1); log::info!( target: TARGET, - "running migration with current storage version {:?} / onchain {:?}.", - current_version, - onchain_version + "running migration with in-code storage version {:?} / onchain {:?}.", + in_code_version, + on_chain_version ); - if onchain_version != 0 { + if on_chain_version != 0 { log::warn!(target: TARGET, "skipping migration from v0 to v1."); return weight } @@ -149,8 +149,8 @@ pub mod v1 { #[cfg(feature = "try-runtime")] fn post_upgrade(state: Vec) -> Result<(), TryRuntimeError> { - let onchain_version = Pallet::::on_chain_storage_version(); - ensure!(onchain_version == 1, "must upgrade from version 0 to 1."); + let on_chain_version = Pallet::::on_chain_storage_version(); + ensure!(on_chain_version == 1, "must upgrade from version 0 to 1."); let pre_referendum_count: u32 = Decode::decode(&mut &state[..]) .expect("failed to decode the state from pre-upgrade."); let post_referendum_count = ReferendumInfoFor::::iter().count() as u32; diff --git a/substrate/frame/referenda/src/weights.rs b/substrate/frame/referenda/src/weights.rs index 4b89379b311da6ac8cd10fd84a127e3693ab00de..1f1b69873ebc44fc045ac8f7864e8c2397e927cf 100644 --- a/substrate/frame/referenda/src/weights.rs +++ b/substrate/frame/referenda/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_referenda +//! Autogenerated weights for `pallet_referenda` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/referenda/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/referenda/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_referenda. +/// Weight functions needed for `pallet_referenda`. pub trait WeightInfo { fn submit() -> Weight; fn place_decision_deposit_preparing() -> Weight; @@ -84,842 +83,874 @@ pub trait WeightInfo { fn clear_metadata() -> Weight; } -/// Weights for pallet_referenda using the Substrate node and recommended hardware. +/// Weights for `pallet_referenda` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Referenda ReferendumCount (r:1 w:1) - /// Proof: Referenda ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:0 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumCount` (r:1 w:1) + /// Proof: `Referenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `220` + // Measured: `286` // Estimated: `110487` - // Minimum execution time: 40_175_000 picoseconds. - Weight::from_parts(41_107_000, 110487) + // Minimum execution time: 31_536_000 picoseconds. + Weight::from_parts(32_797_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `473` + // Measured: `539` // Estimated: `219984` - // Minimum execution time: 50_922_000 picoseconds. - Weight::from_parts(52_179_000, 219984) + // Minimum execution time: 42_579_000 picoseconds. + Weight::from_parts(43_877_000, 219984) .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3260` + // Measured: `3326` // Estimated: `110487` - // Minimum execution time: 69_559_000 picoseconds. - Weight::from_parts(72_143_000, 110487) + // Minimum execution time: 61_439_000 picoseconds. + Weight::from_parts(63_681_000, 110487) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3280` + // Measured: `3346` // Estimated: `110487` - // Minimum execution time: 68_833_000 picoseconds. - Weight::from_parts(70_987_000, 110487) + // Minimum execution time: 60_136_000 picoseconds. + Weight::from_parts(61_841_000, 110487) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `473` + // Measured: `539` // Estimated: `219984` - // Minimum execution time: 61_794_000 picoseconds. - Weight::from_parts(62_846_000, 219984) + // Minimum execution time: 49_865_000 picoseconds. + Weight::from_parts(51_347_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) - } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + .saturating_add(T::DbWeight::get().writes(5_u64)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `473` + // Measured: `539` // Estimated: `219984` - // Minimum execution time: 58_664_000 picoseconds. - Weight::from_parts(60_195_000, 219984) + // Minimum execution time: 47_887_000 picoseconds. + Weight::from_parts(49_927_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `351` + // Measured: `417` // Estimated: `3831` - // Minimum execution time: 30_850_000 picoseconds. - Weight::from_parts(32_130_000, 3831) + // Minimum execution time: 25_721_000 picoseconds. + Weight::from_parts(26_922_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `341` + // Measured: `407` // Estimated: `3831` - // Minimum execution time: 30_747_000 picoseconds. - Weight::from_parts(32_196_000, 3831) + // Minimum execution time: 25_840_000 picoseconds. + Weight::from_parts(26_498_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `219984` - // Minimum execution time: 36_139_000 picoseconds. - Weight::from_parts(37_252_000, 219984) + // Minimum execution time: 30_530_000 picoseconds. + Weight::from_parts(31_547_000, 219984) .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: Referenda MetadataOf (r:1 w:0) - /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:1 w:0) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `622` + // Measured: `688` // Estimated: `219984` - // Minimum execution time: 80_862_000 picoseconds. - Weight::from_parts(83_045_000, 219984) + // Minimum execution time: 64_011_000 picoseconds. + Weight::from_parts(65_240_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Referenda TrackQueue (r:1 w:0) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: `Referenda::TrackQueue` (r:1 w:0) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `240` // Estimated: `5477` - // Minimum execution time: 10_136_000 picoseconds. - Weight::from_parts(10_638_000, 5477) + // Minimum execution time: 9_568_000 picoseconds. + Weight::from_parts(10_004_000, 5477) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `3150` + // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 52_022_000 picoseconds. - Weight::from_parts(53_910_000, 110487) + // Minimum execution time: 43_325_000 picoseconds. + Weight::from_parts(45_335_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `3150` + // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 53_683_000 picoseconds. - Weight::from_parts(55_707_000, 110487) + // Minimum execution time: 44_287_000 picoseconds. + Weight::from_parts(45_164_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `3011` + // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 24_043_000 picoseconds. - Weight::from_parts(24_512_000, 5477) + // Minimum execution time: 21_909_000 picoseconds. + Weight::from_parts(22_641_000, 5477) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `3011` + // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 23_588_000 picoseconds. - Weight::from_parts(24_422_000, 5477) + // Minimum execution time: 21_626_000 picoseconds. + Weight::from_parts(22_338_000, 5477) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3015` + // Measured: `3081` // Estimated: `5477` - // Minimum execution time: 31_443_000 picoseconds. - Weight::from_parts(32_725_000, 5477) + // Minimum execution time: 29_245_000 picoseconds. + Weight::from_parts(30_147_000, 5477) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3035` + // Measured: `3101` // Estimated: `5477` - // Minimum execution time: 30_319_000 picoseconds. - Weight::from_parts(31_652_000, 5477) + // Minimum execution time: 28_512_000 picoseconds. + Weight::from_parts(29_781_000, 5477) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `333` + // Measured: `399` // Estimated: `110487` - // Minimum execution time: 23_062_000 picoseconds. - Weight::from_parts(23_614_000, 110487) + // Minimum execution time: 19_208_000 picoseconds. + Weight::from_parts(19_947_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `110487` - // Minimum execution time: 23_537_000 picoseconds. - Weight::from_parts(24_267_000, 110487) + // Minimum execution time: 19_708_000 picoseconds. + Weight::from_parts(20_783_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `278` + // Measured: `344` // Estimated: `3831` - // Minimum execution time: 16_388_000 picoseconds. - Weight::from_parts(16_676_000, 3831) + // Minimum execution time: 12_947_000 picoseconds. + Weight::from_parts(13_582_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `110487` - // Minimum execution time: 32_801_000 picoseconds. - Weight::from_parts(34_053_000, 110487) + // Minimum execution time: 26_699_000 picoseconds. + Weight::from_parts(28_048_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `110487` - // Minimum execution time: 35_704_000 picoseconds. - Weight::from_parts(36_451_000, 110487) + // Minimum execution time: 28_132_000 picoseconds. + Weight::from_parts(29_520_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `500` // Estimated: `110487` - // Minimum execution time: 29_151_000 picoseconds. - Weight::from_parts(30_055_000, 110487) + // Minimum execution time: 23_212_000 picoseconds. + Weight::from_parts(24_200_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `417` + // Measured: `483` // Estimated: `110487` - // Minimum execution time: 29_265_000 picoseconds. - Weight::from_parts(30_213_000, 110487) + // Minimum execution time: 22_807_000 picoseconds. + Weight::from_parts(23_858_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `500` // Estimated: `110487` - // Minimum execution time: 27_760_000 picoseconds. - Weight::from_parts(28_381_000, 110487) + // Minimum execution time: 22_862_000 picoseconds. + Weight::from_parts(23_627_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `438` + // Measured: `504` // Estimated: `110487` - // Minimum execution time: 25_464_000 picoseconds. - Weight::from_parts(26_348_000, 110487) + // Minimum execution time: 21_030_000 picoseconds. + Weight::from_parts(21_710_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: Scheduler Lookup (r:1 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `438` + // Measured: `504` // Estimated: `219984` - // Minimum execution time: 42_629_000 picoseconds. - Weight::from_parts(43_732_000, 219984) + // Minimum execution time: 33_031_000 picoseconds. + Weight::from_parts(34_518_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `500` // Estimated: `110487` - // Minimum execution time: 30_015_000 picoseconds. - Weight::from_parts(30_827_000, 110487) + // Minimum execution time: 23_198_000 picoseconds. + Weight::from_parts(24_238_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Referenda MetadataOf (r:0 w:1) - /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:0 w:1) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `422` + // Measured: `555` // Estimated: `3831` - // Minimum execution time: 19_901_000 picoseconds. - Weight::from_parts(20_681_000, 3831) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 19_502_000 picoseconds. + Weight::from_parts(20_105_000, 3831) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda MetadataOf (r:1 w:1) - /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:1 w:1) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `355` + // Measured: `421` // Estimated: `3831` - // Minimum execution time: 17_323_000 picoseconds. - Weight::from_parts(18_227_000, 3831) + // Minimum execution time: 15_417_000 picoseconds. + Weight::from_parts(15_916_000, 3831) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Referenda ReferendumCount (r:1 w:1) - /// Proof: Referenda ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:0 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumCount` (r:1 w:1) + /// Proof: `Referenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `220` + // Measured: `286` // Estimated: `110487` - // Minimum execution time: 40_175_000 picoseconds. - Weight::from_parts(41_107_000, 110487) + // Minimum execution time: 31_536_000 picoseconds. + Weight::from_parts(32_797_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `473` + // Measured: `539` // Estimated: `219984` - // Minimum execution time: 50_922_000 picoseconds. - Weight::from_parts(52_179_000, 219984) + // Minimum execution time: 42_579_000 picoseconds. + Weight::from_parts(43_877_000, 219984) .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3260` + // Measured: `3326` // Estimated: `110487` - // Minimum execution time: 69_559_000 picoseconds. - Weight::from_parts(72_143_000, 110487) + // Minimum execution time: 61_439_000 picoseconds. + Weight::from_parts(63_681_000, 110487) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3280` + // Measured: `3346` // Estimated: `110487` - // Minimum execution time: 68_833_000 picoseconds. - Weight::from_parts(70_987_000, 110487) + // Minimum execution time: 60_136_000 picoseconds. + Weight::from_parts(61_841_000, 110487) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `473` + // Measured: `539` // Estimated: `219984` - // Minimum execution time: 61_794_000 picoseconds. - Weight::from_parts(62_846_000, 219984) + // Minimum execution time: 49_865_000 picoseconds. + Weight::from_parts(51_347_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) - } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `473` + // Measured: `539` // Estimated: `219984` - // Minimum execution time: 58_664_000 picoseconds. - Weight::from_parts(60_195_000, 219984) + // Minimum execution time: 47_887_000 picoseconds. + Weight::from_parts(49_927_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `351` + // Measured: `417` // Estimated: `3831` - // Minimum execution time: 30_850_000 picoseconds. - Weight::from_parts(32_130_000, 3831) + // Minimum execution time: 25_721_000 picoseconds. + Weight::from_parts(26_922_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `341` + // Measured: `407` // Estimated: `3831` - // Minimum execution time: 30_747_000 picoseconds. - Weight::from_parts(32_196_000, 3831) + // Minimum execution time: 25_840_000 picoseconds. + Weight::from_parts(26_498_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `219984` - // Minimum execution time: 36_139_000 picoseconds. - Weight::from_parts(37_252_000, 219984) + // Minimum execution time: 30_530_000 picoseconds. + Weight::from_parts(31_547_000, 219984) .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: Referenda MetadataOf (r:1 w:0) - /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:1 w:0) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `622` + // Measured: `688` // Estimated: `219984` - // Minimum execution time: 80_862_000 picoseconds. - Weight::from_parts(83_045_000, 219984) + // Minimum execution time: 64_011_000 picoseconds. + Weight::from_parts(65_240_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Referenda TrackQueue (r:1 w:0) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: `Referenda::TrackQueue` (r:1 w:0) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `240` // Estimated: `5477` - // Minimum execution time: 10_136_000 picoseconds. - Weight::from_parts(10_638_000, 5477) + // Minimum execution time: 9_568_000 picoseconds. + Weight::from_parts(10_004_000, 5477) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `3150` + // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 52_022_000 picoseconds. - Weight::from_parts(53_910_000, 110487) + // Minimum execution time: 43_325_000 picoseconds. + Weight::from_parts(45_335_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `3150` + // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 53_683_000 picoseconds. - Weight::from_parts(55_707_000, 110487) + // Minimum execution time: 44_287_000 picoseconds. + Weight::from_parts(45_164_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `3011` + // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 24_043_000 picoseconds. - Weight::from_parts(24_512_000, 5477) + // Minimum execution time: 21_909_000 picoseconds. + Weight::from_parts(22_641_000, 5477) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `3011` + // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 23_588_000 picoseconds. - Weight::from_parts(24_422_000, 5477) + // Minimum execution time: 21_626_000 picoseconds. + Weight::from_parts(22_338_000, 5477) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3015` + // Measured: `3081` // Estimated: `5477` - // Minimum execution time: 31_443_000 picoseconds. - Weight::from_parts(32_725_000, 5477) + // Minimum execution time: 29_245_000 picoseconds. + Weight::from_parts(30_147_000, 5477) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3035` + // Measured: `3101` // Estimated: `5477` - // Minimum execution time: 30_319_000 picoseconds. - Weight::from_parts(31_652_000, 5477) + // Minimum execution time: 28_512_000 picoseconds. + Weight::from_parts(29_781_000, 5477) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `333` + // Measured: `399` // Estimated: `110487` - // Minimum execution time: 23_062_000 picoseconds. - Weight::from_parts(23_614_000, 110487) + // Minimum execution time: 19_208_000 picoseconds. + Weight::from_parts(19_947_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `110487` - // Minimum execution time: 23_537_000 picoseconds. - Weight::from_parts(24_267_000, 110487) + // Minimum execution time: 19_708_000 picoseconds. + Weight::from_parts(20_783_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `278` + // Measured: `344` // Estimated: `3831` - // Minimum execution time: 16_388_000 picoseconds. - Weight::from_parts(16_676_000, 3831) + // Minimum execution time: 12_947_000 picoseconds. + Weight::from_parts(13_582_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `110487` - // Minimum execution time: 32_801_000 picoseconds. - Weight::from_parts(34_053_000, 110487) + // Minimum execution time: 26_699_000 picoseconds. + Weight::from_parts(28_048_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `110487` - // Minimum execution time: 35_704_000 picoseconds. - Weight::from_parts(36_451_000, 110487) + // Minimum execution time: 28_132_000 picoseconds. + Weight::from_parts(29_520_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `500` // Estimated: `110487` - // Minimum execution time: 29_151_000 picoseconds. - Weight::from_parts(30_055_000, 110487) + // Minimum execution time: 23_212_000 picoseconds. + Weight::from_parts(24_200_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `417` + // Measured: `483` // Estimated: `110487` - // Minimum execution time: 29_265_000 picoseconds. - Weight::from_parts(30_213_000, 110487) + // Minimum execution time: 22_807_000 picoseconds. + Weight::from_parts(23_858_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `500` // Estimated: `110487` - // Minimum execution time: 27_760_000 picoseconds. - Weight::from_parts(28_381_000, 110487) + // Minimum execution time: 22_862_000 picoseconds. + Weight::from_parts(23_627_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `438` + // Measured: `504` // Estimated: `110487` - // Minimum execution time: 25_464_000 picoseconds. - Weight::from_parts(26_348_000, 110487) + // Minimum execution time: 21_030_000 picoseconds. + Weight::from_parts(21_710_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: Scheduler Lookup (r:1 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `438` + // Measured: `504` // Estimated: `219984` - // Minimum execution time: 42_629_000 picoseconds. - Weight::from_parts(43_732_000, 219984) + // Minimum execution time: 33_031_000 picoseconds. + Weight::from_parts(34_518_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `500` // Estimated: `110487` - // Minimum execution time: 30_015_000 picoseconds. - Weight::from_parts(30_827_000, 110487) + // Minimum execution time: 23_198_000 picoseconds. + Weight::from_parts(24_238_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Referenda MetadataOf (r:0 w:1) - /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:0 w:1) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `422` + // Measured: `555` // Estimated: `3831` - // Minimum execution time: 19_901_000 picoseconds. - Weight::from_parts(20_681_000, 3831) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 19_502_000 picoseconds. + Weight::from_parts(20_105_000, 3831) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda MetadataOf (r:1 w:1) - /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:1 w:1) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `355` + // Measured: `421` // Estimated: `3831` - // Minimum execution time: 17_323_000 picoseconds. - Weight::from_parts(18_227_000, 3831) + // Minimum execution time: 15_417_000 picoseconds. + Weight::from_parts(15_916_000, 3831) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/remark/src/weights.rs b/substrate/frame/remark/src/weights.rs index 46475db163ffd5827486f8ad562c2f3ecc6deb3d..61cca5b7d05643c200b8d9c79ccc44bde9901bf2 100644 --- a/substrate/frame/remark/src/weights.rs +++ b/substrate/frame/remark/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_remark +//! Autogenerated weights for `pallet_remark` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/remark/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/remark/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,12 +49,12 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_remark. +/// Weight functions needed for `pallet_remark`. pub trait WeightInfo { fn store(l: u32, ) -> Weight; } -/// Weights for pallet_remark using the Substrate node and recommended hardware. +/// Weights for `pallet_remark` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `l` is `[1, 1048576]`. @@ -63,23 +62,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_471_000 picoseconds. - Weight::from_parts(8_586_000, 0) + // Minimum execution time: 6_623_000 picoseconds. + Weight::from_parts(6_757_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_359, 0).saturating_mul(l.into())) + .saturating_add(Weight::from_parts(1_368, 0).saturating_mul(l.into())) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { /// The range of component `l` is `[1, 1048576]`. fn store(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_471_000 picoseconds. - Weight::from_parts(8_586_000, 0) + // Minimum execution time: 6_623_000 picoseconds. + Weight::from_parts(6_757_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_359, 0).saturating_mul(l.into())) + .saturating_add(Weight::from_parts(1_368, 0).saturating_mul(l.into())) } } diff --git a/substrate/frame/safe-mode/src/weights.rs b/substrate/frame/safe-mode/src/weights.rs index f72bebcab9a4db74fba3ef282692324e32858fc3..d326fab0f0f41d3d383a613cbfe9c6dab2ebd981 100644 --- a/substrate/frame/safe-mode/src/weights.rs +++ b/substrate/frame/safe-mode/src/weights.rs @@ -17,27 +17,29 @@ //! Autogenerated weights for `pallet_safe_mode` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-aahe6cbd-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_safe_mode +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_safe_mode -// --chain=dev -// --header=./HEADER-APACHE2 -// --output=./frame/safe-mode/src/weights.rs -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/safe-mode/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -70,8 +72,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 2_500_000 picoseconds. - Weight::from_parts(2_594_000, 1489) + // Minimum execution time: 2_216_000 picoseconds. + Weight::from_parts(2_309_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) @@ -80,23 +82,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 8_868_000 picoseconds. - Weight::from_parts(9_415_000, 1489) + // Minimum execution time: 6_124_000 picoseconds. + Weight::from_parts(6_601_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn enter() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `3550` - // Minimum execution time: 50_541_000 picoseconds. - Weight::from_parts(51_558_000, 3550) + // Estimated: `3658` + // Minimum execution time: 44_825_000 picoseconds. + Weight::from_parts(45_845_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -106,23 +108,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 10_489_000 picoseconds. - Weight::from_parts(10_833_000, 1489) + // Minimum execution time: 7_603_000 picoseconds. + Weight::from_parts(7_954_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn extend() -> Weight { // Proof Size summary in bytes: // Measured: `169` - // Estimated: `3550` - // Minimum execution time: 50_818_000 picoseconds. - Weight::from_parts(51_873_000, 3550) + // Estimated: `3658` + // Minimum execution time: 44_716_000 picoseconds. + Weight::from_parts(46_039_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -132,8 +134,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 10_843_000 picoseconds. - Weight::from_parts(11_314_000, 1489) + // Minimum execution time: 8_231_000 picoseconds. + Weight::from_parts(8_731_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -143,8 +145,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 10_382_000 picoseconds. - Weight::from_parts(10_814_000, 1489) + // Minimum execution time: 8_015_000 picoseconds. + Weight::from_parts(8_247_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -153,39 +155,39 @@ impl WeightInfo for SubstrateWeight { /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3550` - // Minimum execution time: 42_828_000 picoseconds. - Weight::from_parts(43_752_000, 3550) + // Estimated: `3658` + // Minimum execution time: 39_149_000 picoseconds. + Weight::from_parts(40_005_000, 3658) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn force_release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3550` - // Minimum execution time: 40_196_000 picoseconds. - Weight::from_parts(41_298_000, 3550) + // Estimated: `3658` + // Minimum execution time: 37_528_000 picoseconds. + Weight::from_parts(38_473_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn force_slash_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3550` - // Minimum execution time: 33_660_000 picoseconds. - Weight::from_parts(34_426_000, 3550) + // Estimated: `3658` + // Minimum execution time: 29_417_000 picoseconds. + Weight::from_parts(30_195_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -199,8 +201,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 2_500_000 picoseconds. - Weight::from_parts(2_594_000, 1489) + // Minimum execution time: 2_216_000 picoseconds. + Weight::from_parts(2_309_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) @@ -209,23 +211,23 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 8_868_000 picoseconds. - Weight::from_parts(9_415_000, 1489) + // Minimum execution time: 6_124_000 picoseconds. + Weight::from_parts(6_601_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn enter() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `3550` - // Minimum execution time: 50_541_000 picoseconds. - Weight::from_parts(51_558_000, 3550) + // Estimated: `3658` + // Minimum execution time: 44_825_000 picoseconds. + Weight::from_parts(45_845_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -235,23 +237,23 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 10_489_000 picoseconds. - Weight::from_parts(10_833_000, 1489) + // Minimum execution time: 7_603_000 picoseconds. + Weight::from_parts(7_954_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn extend() -> Weight { // Proof Size summary in bytes: // Measured: `169` - // Estimated: `3550` - // Minimum execution time: 50_818_000 picoseconds. - Weight::from_parts(51_873_000, 3550) + // Estimated: `3658` + // Minimum execution time: 44_716_000 picoseconds. + Weight::from_parts(46_039_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -261,8 +263,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 10_843_000 picoseconds. - Weight::from_parts(11_314_000, 1489) + // Minimum execution time: 8_231_000 picoseconds. + Weight::from_parts(8_731_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -272,8 +274,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 10_382_000 picoseconds. - Weight::from_parts(10_814_000, 1489) + // Minimum execution time: 8_015_000 picoseconds. + Weight::from_parts(8_247_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -282,39 +284,39 @@ impl WeightInfo for () { /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3550` - // Minimum execution time: 42_828_000 picoseconds. - Weight::from_parts(43_752_000, 3550) + // Estimated: `3658` + // Minimum execution time: 39_149_000 picoseconds. + Weight::from_parts(40_005_000, 3658) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn force_release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3550` - // Minimum execution time: 40_196_000 picoseconds. - Weight::from_parts(41_298_000, 3550) + // Estimated: `3658` + // Minimum execution time: 37_528_000 picoseconds. + Weight::from_parts(38_473_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn force_slash_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3550` - // Minimum execution time: 33_660_000 picoseconds. - Weight::from_parts(34_426_000, 3550) + // Estimated: `3658` + // Minimum execution time: 29_417_000 picoseconds. + Weight::from_parts(30_195_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/salary/src/weights.rs b/substrate/frame/salary/src/weights.rs index 3d3b9e315959be023cfe09ae00e993cabedea99a..c4f22a027eb42f011c98900571fc9dae2cb5d503 100644 --- a/substrate/frame/salary/src/weights.rs +++ b/substrate/frame/salary/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_salary +//! Autogenerated weights for `pallet_salary` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/salary/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/salary/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_salary. +/// Weight functions needed for `pallet_salary`. pub trait WeightInfo { fn init() -> Weight; fn bump() -> Weight; @@ -61,204 +60,204 @@ pub trait WeightInfo { fn check_payment() -> Weight; } -/// Weights for pallet_salary using the Substrate node and recommended hardware. +/// Weights for `pallet_salary` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) fn init() -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1541` - // Minimum execution time: 10_778_000 picoseconds. - Weight::from_parts(11_084_000, 1541) + // Minimum execution time: 7_421_000 picoseconds. + Weight::from_parts(7_819_000, 1541) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) fn bump() -> Weight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1541` - // Minimum execution time: 12_042_000 picoseconds. - Weight::from_parts(12_645_000, 1541) + // Minimum execution time: 8_651_000 picoseconds. + Weight::from_parts(9_056_000, 1541) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Salary Status (r:1 w:0) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:0) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) fn induct() -> Weight { // Proof Size summary in bytes: // Measured: `395` // Estimated: `3543` - // Minimum execution time: 18_374_000 picoseconds. - Weight::from_parts(19_200_000, 3543) + // Minimum execution time: 16_513_000 picoseconds. + Weight::from_parts(17_305_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) fn register() -> Weight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 22_696_000 picoseconds. - Weight::from_parts(23_275_000, 3543) + // Minimum execution time: 18_913_000 picoseconds. + Weight::from_parts(19_867_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 63_660_000 picoseconds. - Weight::from_parts(65_006_000, 3543) + // Minimum execution time: 53_297_000 picoseconds. + Weight::from_parts(55_144_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout_other() -> Weight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3593` - // Minimum execution time: 64_706_000 picoseconds. - Weight::from_parts(65_763_000, 3593) + // Minimum execution time: 53_638_000 picoseconds. + Weight::from_parts(55_328_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) fn check_payment() -> Weight { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3543` - // Minimum execution time: 11_838_000 picoseconds. - Weight::from_parts(12_323_000, 3543) + // Minimum execution time: 10_557_000 picoseconds. + Weight::from_parts(11_145_000, 3543) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) fn init() -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1541` - // Minimum execution time: 10_778_000 picoseconds. - Weight::from_parts(11_084_000, 1541) + // Minimum execution time: 7_421_000 picoseconds. + Weight::from_parts(7_819_000, 1541) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) fn bump() -> Weight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1541` - // Minimum execution time: 12_042_000 picoseconds. - Weight::from_parts(12_645_000, 1541) + // Minimum execution time: 8_651_000 picoseconds. + Weight::from_parts(9_056_000, 1541) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Salary Status (r:1 w:0) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:0) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) fn induct() -> Weight { // Proof Size summary in bytes: // Measured: `395` // Estimated: `3543` - // Minimum execution time: 18_374_000 picoseconds. - Weight::from_parts(19_200_000, 3543) + // Minimum execution time: 16_513_000 picoseconds. + Weight::from_parts(17_305_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) fn register() -> Weight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 22_696_000 picoseconds. - Weight::from_parts(23_275_000, 3543) + // Minimum execution time: 18_913_000 picoseconds. + Weight::from_parts(19_867_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 63_660_000 picoseconds. - Weight::from_parts(65_006_000, 3543) + // Minimum execution time: 53_297_000 picoseconds. + Weight::from_parts(55_144_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout_other() -> Weight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3593` - // Minimum execution time: 64_706_000 picoseconds. - Weight::from_parts(65_763_000, 3593) + // Minimum execution time: 53_638_000 picoseconds. + Weight::from_parts(55_328_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) fn check_payment() -> Weight { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3543` - // Minimum execution time: 11_838_000 picoseconds. - Weight::from_parts(12_323_000, 3543) + // Minimum execution time: 10_557_000 picoseconds. + Weight::from_parts(11_145_000, 3543) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/sassafras/src/mock.rs b/substrate/frame/sassafras/src/mock.rs index 5e5909fcb0d6e0d4db41ce96035fa9322cf65bbd..4c6efaa63a9ea46bdda14f0d5114184c5c42c43b 100644 --- a/substrate/frame/sassafras/src/mock.rs +++ b/substrate/frame/sassafras/src/mock.rs @@ -34,7 +34,8 @@ use sp_core::{ H256, U256, }; use sp_runtime::{ - testing::{Digest, DigestItem, Header, TestXt}, + generic::UncheckedExtrinsic, + testing::{Digest, DigestItem, Header}, BuildStorage, }; @@ -53,7 +54,7 @@ where RuntimeCall: From, { type OverarchingCall = RuntimeCall; - type Extrinsic = TestXt; + type Extrinsic = UncheckedExtrinsic; } impl pallet_sassafras::Config for Test { diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index daebebdee9956a2317ca6d755999797a52ac42ce..af3abd8ac4f2a81b363c4553f3652a2cba972212 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -229,7 +229,7 @@ pub mod pallet { use frame_support::{dispatch::PostDispatchInfo, pallet_prelude::*}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] diff --git a/substrate/frame/scheduler/src/weights.rs b/substrate/frame/scheduler/src/weights.rs index 9b7e5405a1b5bdd3945f0935801f79843cd0d78a..6c98145d266fdd7603cbd625ef55968013b3cd32 100644 --- a/substrate/frame/scheduler/src/weights.rs +++ b/substrate/frame/scheduler/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_scheduler` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_scheduler +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_scheduler -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/scheduler/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -77,8 +79,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `31` // Estimated: `1489` - // Minimum execution time: 3_040_000 picoseconds. - Weight::from_parts(3_202_000, 1489) + // Minimum execution time: 3_128_000 picoseconds. + Weight::from_parts(3_372_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -89,10 +91,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 3_462_000 picoseconds. - Weight::from_parts(6_262_125, 110487) - // Standard Error: 536 - .saturating_add(Weight::from_parts(332_570, 0).saturating_mul(s.into())) + // Minimum execution time: 3_560_000 picoseconds. + Weight::from_parts(6_356_795, 110487) + // Standard Error: 493 + .saturating_add(Weight::from_parts(315_098, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -100,8 +102,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_425_000 picoseconds. - Weight::from_parts(3_680_000, 0) + // Minimum execution time: 3_501_000 picoseconds. + Weight::from_parts(3_722_000, 0) } /// Storage: `Preimage::PreimageFor` (r:1 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) @@ -114,10 +116,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `246 + s * (1 ±0)` // Estimated: `3711 + s * (1 ±0)` - // Minimum execution time: 17_564_000 picoseconds. - Weight::from_parts(17_887_000, 3711) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_253, 0).saturating_mul(s.into())) + // Minimum execution time: 17_976_000 picoseconds. + Weight::from_parts(18_137_000, 3711) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_173, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) @@ -128,16 +130,16 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_934_000 picoseconds. - Weight::from_parts(5_275_000, 0) + // Minimum execution time: 4_935_000 picoseconds. + Weight::from_parts(5_133_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } fn service_task_periodic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_348_000 picoseconds. - Weight::from_parts(3_561_000, 0) + // Minimum execution time: 3_467_000 picoseconds. + Weight::from_parts(3_654_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -147,16 +149,16 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 6_395_000 picoseconds. - Weight::from_parts(6_642_000, 3997) + // Minimum execution time: 6_528_000 picoseconds. + Weight::from_parts(6_820_000, 3997) .saturating_add(T::DbWeight::get().reads(2_u64)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_167_000 picoseconds. - Weight::from_parts(2_266_000, 0) + // Minimum execution time: 2_202_000 picoseconds. + Weight::from_parts(2_360_000, 0) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) @@ -165,15 +167,17 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 10_009_000 picoseconds. - Weight::from_parts(13_565_985, 110487) - // Standard Error: 575 - .saturating_add(Weight::from_parts(354_760, 0).saturating_mul(s.into())) + // Minimum execution time: 10_222_000 picoseconds. + Weight::from_parts(13_654_958, 110487) + // Standard Error: 676 + .saturating_add(Weight::from_parts(338_633, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:0 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. @@ -181,12 +185,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 14_048_000 picoseconds. - Weight::from_parts(15_141_696, 110487) - // Standard Error: 1_082 - .saturating_add(Weight::from_parts(533_390, 0).saturating_mul(s.into())) + // Minimum execution time: 15_517_000 picoseconds. + Weight::from_parts(17_464_075, 110487) + // Standard Error: 952 + .saturating_add(Weight::from_parts(495_806, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) @@ -197,10 +201,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `596 + s * (178 ±0)` // Estimated: `110487` - // Minimum execution time: 12_902_000 picoseconds. - Weight::from_parts(18_957_156, 110487) - // Standard Error: 792 - .saturating_add(Weight::from_parts(361_909, 0).saturating_mul(s.into())) + // Minimum execution time: 13_091_000 picoseconds. + Weight::from_parts(19_101_313, 110487) + // Standard Error: 662 + .saturating_add(Weight::from_parts(342_468, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -208,35 +212,35 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `709 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 15_933_000 picoseconds. - Weight::from_parts(18_091_415, 110487) - // Standard Error: 779 - .saturating_add(Weight::from_parts(534_402, 0).saturating_mul(s.into())) + // Minimum execution time: 17_579_000 picoseconds. + Weight::from_parts(20_561_921, 110487) + // Standard Error: 792 + .saturating_add(Weight::from_parts(500_463, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: `Scheduler::Retries` (r:1 w:2) - /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Lookup` (r:0 w:1) - /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. fn schedule_retry(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `159` + // Measured: `118` // Estimated: `110487` - // Minimum execution time: 14_155_000 picoseconds. - Weight::from_parts(16_447_031, 110487) - // Standard Error: 233 - .saturating_add(Weight::from_parts(8_424, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Minimum execution time: 8_996_000 picoseconds. + Weight::from_parts(11_393_234, 110487) + // Standard Error: 190 + .saturating_add(Weight::from_parts(6_714, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Scheduler::Agenda` (r:1 w:0) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) @@ -244,10 +248,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn set_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `81 + s * (177 ±0)` + // Measured: `90705` // Estimated: `110487` - // Minimum execution time: 8_130_000 picoseconds. - Weight::from_parts(9_047_554, 110487) + // Minimum execution time: 121_505_000 picoseconds. + Weight::from_parts(124_306_000, 110487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -259,10 +263,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn set_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `647 + s * (178 ±0)` + // Measured: `91747` // Estimated: `110487` - // Minimum execution time: 10_838_000 picoseconds. - Weight::from_parts(12_804_076, 110487) + // Minimum execution time: 128_070_000 picoseconds. + Weight::from_parts(132_683_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -272,10 +276,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `81 + s * (177 ±0)` + // Measured: `90717` // Estimated: `110487` - // Minimum execution time: 8_130_000 picoseconds. - Weight::from_parts(9_047_554, 110487) + // Minimum execution time: 118_260_000 picoseconds. + Weight::from_parts(119_722_000, 110487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -287,10 +291,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `647 + s * (178 ±0)` + // Measured: `91759` // Estimated: `110487` - // Minimum execution time: 10_838_000 picoseconds. - Weight::from_parts(12_804_076, 110487) + // Minimum execution time: 129_036_000 picoseconds. + Weight::from_parts(133_975_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -304,8 +308,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `31` // Estimated: `1489` - // Minimum execution time: 3_040_000 picoseconds. - Weight::from_parts(3_202_000, 1489) + // Minimum execution time: 3_128_000 picoseconds. + Weight::from_parts(3_372_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -316,10 +320,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 3_462_000 picoseconds. - Weight::from_parts(6_262_125, 110487) - // Standard Error: 536 - .saturating_add(Weight::from_parts(332_570, 0).saturating_mul(s.into())) + // Minimum execution time: 3_560_000 picoseconds. + Weight::from_parts(6_356_795, 110487) + // Standard Error: 493 + .saturating_add(Weight::from_parts(315_098, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -327,8 +331,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_425_000 picoseconds. - Weight::from_parts(3_680_000, 0) + // Minimum execution time: 3_501_000 picoseconds. + Weight::from_parts(3_722_000, 0) } /// Storage: `Preimage::PreimageFor` (r:1 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) @@ -341,10 +345,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `246 + s * (1 ±0)` // Estimated: `3711 + s * (1 ±0)` - // Minimum execution time: 17_564_000 picoseconds. - Weight::from_parts(17_887_000, 3711) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_253, 0).saturating_mul(s.into())) + // Minimum execution time: 17_976_000 picoseconds. + Weight::from_parts(18_137_000, 3711) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_173, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) @@ -355,16 +359,16 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_934_000 picoseconds. - Weight::from_parts(5_275_000, 0) + // Minimum execution time: 4_935_000 picoseconds. + Weight::from_parts(5_133_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn service_task_periodic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_348_000 picoseconds. - Weight::from_parts(3_561_000, 0) + // Minimum execution time: 3_467_000 picoseconds. + Weight::from_parts(3_654_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -374,16 +378,16 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 6_395_000 picoseconds. - Weight::from_parts(6_642_000, 3997) + // Minimum execution time: 6_528_000 picoseconds. + Weight::from_parts(6_820_000, 3997) .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_167_000 picoseconds. - Weight::from_parts(2_266_000, 0) + // Minimum execution time: 2_202_000 picoseconds. + Weight::from_parts(2_360_000, 0) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) @@ -392,15 +396,17 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 10_009_000 picoseconds. - Weight::from_parts(13_565_985, 110487) - // Standard Error: 575 - .saturating_add(Weight::from_parts(354_760, 0).saturating_mul(s.into())) + // Minimum execution time: 10_222_000 picoseconds. + Weight::from_parts(13_654_958, 110487) + // Standard Error: 676 + .saturating_add(Weight::from_parts(338_633, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:0 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. @@ -408,12 +414,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 14_048_000 picoseconds. - Weight::from_parts(15_141_696, 110487) - // Standard Error: 1_082 - .saturating_add(Weight::from_parts(533_390, 0).saturating_mul(s.into())) + // Minimum execution time: 15_517_000 picoseconds. + Weight::from_parts(17_464_075, 110487) + // Standard Error: 952 + .saturating_add(Weight::from_parts(495_806, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) @@ -424,10 +430,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `596 + s * (178 ±0)` // Estimated: `110487` - // Minimum execution time: 12_902_000 picoseconds. - Weight::from_parts(18_957_156, 110487) - // Standard Error: 792 - .saturating_add(Weight::from_parts(361_909, 0).saturating_mul(s.into())) + // Minimum execution time: 13_091_000 picoseconds. + Weight::from_parts(19_101_313, 110487) + // Standard Error: 662 + .saturating_add(Weight::from_parts(342_468, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -435,35 +441,35 @@ impl WeightInfo for () { /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `709 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 15_933_000 picoseconds. - Weight::from_parts(18_091_415, 110487) - // Standard Error: 779 - .saturating_add(Weight::from_parts(534_402, 0).saturating_mul(s.into())) + // Minimum execution time: 17_579_000 picoseconds. + Weight::from_parts(20_561_921, 110487) + // Standard Error: 792 + .saturating_add(Weight::from_parts(500_463, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: `Scheduler::Retries` (r:1 w:2) - /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Lookup` (r:0 w:1) - /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. fn schedule_retry(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `159` + // Measured: `118` // Estimated: `110487` - // Minimum execution time: 14_155_000 picoseconds. - Weight::from_parts(16_447_031, 110487) - // Standard Error: 233 - .saturating_add(Weight::from_parts(8_424, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Minimum execution time: 8_996_000 picoseconds. + Weight::from_parts(11_393_234, 110487) + // Standard Error: 190 + .saturating_add(Weight::from_parts(6_714, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Scheduler::Agenda` (r:1 w:0) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) @@ -471,10 +477,10 @@ impl WeightInfo for () { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn set_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `81 + s * (177 ±0)` + // Measured: `90705` // Estimated: `110487` - // Minimum execution time: 8_130_000 picoseconds. - Weight::from_parts(9_047_554, 110487) + // Minimum execution time: 121_505_000 picoseconds. + Weight::from_parts(124_306_000, 110487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -486,10 +492,10 @@ impl WeightInfo for () { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn set_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `647 + s * (178 ±0)` + // Measured: `91747` // Estimated: `110487` - // Minimum execution time: 10_838_000 picoseconds. - Weight::from_parts(12_804_076, 110487) + // Minimum execution time: 128_070_000 picoseconds. + Weight::from_parts(132_683_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -499,10 +505,10 @@ impl WeightInfo for () { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `81 + s * (177 ±0)` + // Measured: `90717` // Estimated: `110487` - // Minimum execution time: 8_130_000 picoseconds. - Weight::from_parts(9_047_554, 110487) + // Minimum execution time: 118_260_000 picoseconds. + Weight::from_parts(119_722_000, 110487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -514,10 +520,10 @@ impl WeightInfo for () { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `647 + s * (178 ±0)` + // Measured: `91759` // Estimated: `110487` - // Minimum execution time: 10_838_000 picoseconds. - Weight::from_parts(12_804_076, 110487) + // Minimum execution time: 129_036_000 picoseconds. + Weight::from_parts(133_975_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/session/src/historical/mod.rs b/substrate/frame/session/src/historical/mod.rs index d74e9dd0b7c54042840c62feff0693359f97c6c9..b9cecea1a7f7144fb7f846548b827455dce0a1e5 100644 --- a/substrate/frame/session/src/historical/mod.rs +++ b/substrate/frame/session/src/historical/mod.rs @@ -58,7 +58,7 @@ pub mod pallet { use super::*; use frame_support::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index fc35cd6ddd82a64ad625a3bcf3cc9e6264e0a9e3..7d8128dbc1fe64c647994f6da901d4b3bd9e008f 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -368,7 +368,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); #[pallet::pallet] diff --git a/substrate/frame/session/src/weights.rs b/substrate/frame/session/src/weights.rs index dd9848fd2c177913f9121f98ab269bdae7e0f40e..09eb665ff3d815b2fe7ebc1fb363c97364f93634 100644 --- a/substrate/frame/session/src/weights.rs +++ b/substrate/frame/session/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_session +//! Autogenerated weights for `pallet_session` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/session/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/session/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,77 +49,77 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_session. +/// Weight functions needed for `pallet_session`. pub trait WeightInfo { fn set_keys() -> Weight; fn purge_keys() -> Weight; } -/// Weights for pallet_session using the Substrate node and recommended hardware. +/// Weights for `pallet_session` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Session NextKeys (r:1 w:1) - /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session KeyOwner (r:4 w:4) - /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:6 w:6) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1924` - // Estimated: `12814` - // Minimum execution time: 55_459_000 picoseconds. - Weight::from_parts(56_180_000, 12814) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) + // Measured: `1919` + // Estimated: `17759` + // Minimum execution time: 57_921_000 picoseconds. + Weight::from_parts(58_960_000, 17759) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Session NextKeys (r:1 w:1) - /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session KeyOwner (r:0 w:4) - /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:0 w:6) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn purge_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1791` - // Estimated: `5256` - // Minimum execution time: 40_194_000 picoseconds. - Weight::from_parts(41_313_000, 5256) + // Measured: `1817` + // Estimated: `5282` + // Minimum execution time: 40_983_000 picoseconds. + Weight::from_parts(42_700_000, 5282) .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Session NextKeys (r:1 w:1) - /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session KeyOwner (r:4 w:4) - /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:6 w:6) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1924` - // Estimated: `12814` - // Minimum execution time: 55_459_000 picoseconds. - Weight::from_parts(56_180_000, 12814) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) + // Measured: `1919` + // Estimated: `17759` + // Minimum execution time: 57_921_000 picoseconds. + Weight::from_parts(58_960_000, 17759) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Session NextKeys (r:1 w:1) - /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session KeyOwner (r:0 w:4) - /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:0 w:6) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn purge_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1791` - // Estimated: `5256` - // Minimum execution time: 40_194_000 picoseconds. - Weight::from_parts(41_313_000, 5256) + // Measured: `1817` + // Estimated: `5282` + // Minimum execution time: 40_983_000 picoseconds. + Weight::from_parts(42_700_000, 5282) .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } } diff --git a/substrate/frame/society/src/migrations.rs b/substrate/frame/society/src/migrations.rs index dafb1e0b9e5e99706de94b1a4fdd1808343af29d..6a1029114519db8c41c56301197d04de26bfb6c8 100644 --- a/substrate/frame/society/src/migrations.rs +++ b/substrate/frame/society/src/migrations.rs @@ -40,11 +40,11 @@ impl< { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { - let current = Pallet::::current_storage_version(); - let onchain = Pallet::::on_chain_storage_version(); - ensure!(onchain == 0 && current == 2, "pallet_society: invalid version"); + let in_code = Pallet::::in_code_storage_version(); + let on_chain = Pallet::::on_chain_storage_version(); + ensure!(on_chain == 0 && in_code == 2, "pallet_society: invalid version"); - Ok((old::Candidates::::get(), old::Members::::get()).encode()) + Ok((v0::Candidates::::get(), v0::Members::::get()).encode()) } fn on_runtime_upgrade() -> Weight { @@ -103,7 +103,7 @@ pub type MigrateToV2 = frame_support::migrations::VersionedMi ::DbWeight, >; -pub(crate) mod old { +pub(crate) mod v0 { use super::*; use frame_support::storage_alias; @@ -230,37 +230,37 @@ pub fn assert_internal_consistency, I: Instance + 'static>() { } // We don't use these - make sure they don't exist. - assert_eq!(old::SuspendedCandidates::::iter().count(), 0); - assert_eq!(old::Strikes::::iter().count(), 0); - assert_eq!(old::Vouching::::iter().count(), 0); - assert!(!old::Defender::::exists()); - assert!(!old::Members::::exists()); + assert_eq!(v0::SuspendedCandidates::::iter().count(), 0); + assert_eq!(v0::Strikes::::iter().count(), 0); + assert_eq!(v0::Vouching::::iter().count(), 0); + assert!(!v0::Defender::::exists()); + assert!(!v0::Members::::exists()); } pub fn from_original, I: Instance + 'static>( past_payouts: &mut [(::AccountId, BalanceOf)], ) -> Result { // Migrate Bids from old::Bids (just a trunctation). - Bids::::put(BoundedVec::<_, T::MaxBids>::truncate_from(old::Bids::::take())); + Bids::::put(BoundedVec::<_, T::MaxBids>::truncate_from(v0::Bids::::take())); // Initialise round counter. RoundCount::::put(0); // Migrate Candidates from old::Candidates - for Bid { who: candidate, kind, value } in old::Candidates::::take().into_iter() { + for Bid { who: candidate, kind, value } in v0::Candidates::::take().into_iter() { let mut tally = Tally::default(); // Migrate Votes from old::Votes // No need to drain, since we're overwriting values. - for (voter, vote) in old::Votes::::iter_prefix(&candidate) { + for (voter, vote) in v0::Votes::::iter_prefix(&candidate) { Votes::::insert( &candidate, &voter, - Vote { approve: vote == old::Vote::Approve, weight: 1 }, + Vote { approve: vote == v0::Vote::Approve, weight: 1 }, ); match vote { - old::Vote::Approve => tally.approvals.saturating_inc(), - old::Vote::Reject => tally.rejections.saturating_inc(), - old::Vote::Skeptic => Skeptic::::put(&voter), + v0::Vote::Approve => tally.approvals.saturating_inc(), + v0::Vote::Reject => tally.rejections.saturating_inc(), + v0::Vote::Skeptic => Skeptic::::put(&voter), } } Candidates::::insert( @@ -271,9 +271,9 @@ pub fn from_original, I: Instance + 'static>( // Migrate Members from old::Members old::Strikes old::Vouching let mut member_count = 0; - for member in old::Members::::take() { - let strikes = old::Strikes::::take(&member); - let vouching = old::Vouching::::take(&member); + for member in v0::Members::::take() { + let strikes = v0::Strikes::::take(&member); + let vouching = v0::Vouching::::take(&member); let record = MemberRecord { index: member_count, rank: 0, strikes, vouching }; Members::::insert(&member, record); MemberByIndex::::insert(member_count, &member); @@ -314,7 +314,7 @@ pub fn from_original, I: Instance + 'static>( // Migrate Payouts from: old::Payouts and raw info (needed since we can't query old chain // state). past_payouts.sort(); - for (who, mut payouts) in old::Payouts::::iter() { + for (who, mut payouts) in v0::Payouts::::iter() { payouts.truncate(T::MaxPayouts::get() as usize); // ^^ Safe since we already truncated. let paid = past_payouts @@ -329,19 +329,19 @@ pub fn from_original, I: Instance + 'static>( } // Migrate SuspendedMembers from old::SuspendedMembers old::Strikes old::Vouching. - for who in old::SuspendedMembers::::iter_keys() { - let strikes = old::Strikes::::take(&who); - let vouching = old::Vouching::::take(&who); + for who in v0::SuspendedMembers::::iter_keys() { + let strikes = v0::Strikes::::take(&who); + let vouching = v0::Vouching::::take(&who); let record = MemberRecord { index: 0, rank: 0, strikes, vouching }; SuspendedMembers::::insert(&who, record); } // Any suspended candidates remaining are rejected. - let _ = old::SuspendedCandidates::::clear(u32::MAX, None); + let _ = v0::SuspendedCandidates::::clear(u32::MAX, None); // We give the current defender the benefit of the doubt. - old::Defender::::kill(); - let _ = old::DefenderVotes::::clear(u32::MAX, None); + v0::Defender::::kill(); + let _ = v0::DefenderVotes::::clear(u32::MAX, None); Ok(T::BlockWeights::get().max_block) } diff --git a/substrate/frame/society/src/tests.rs b/substrate/frame/society/src/tests.rs index 940643168fb41fd723905497935b7cb543b7f7b7..411567e1dedfde46450c0e778bcb14bb00883962 100644 --- a/substrate/frame/society/src/tests.rs +++ b/substrate/frame/society/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for the module. use super::*; -use migrations::old; +use migrations::v0; use mock::*; use frame_support::{assert_noop, assert_ok}; @@ -32,41 +32,41 @@ use RuntimeOrigin as Origin; #[test] fn migration_works() { EnvBuilder::new().founded(false).execute(|| { - use old::Vote::*; + use v0::Vote::*; // Initialise the old storage items. Founder::::put(10); Head::::put(30); - old::Members::::put(vec![10, 20, 30]); - old::Vouching::::insert(30, Vouching); - old::Vouching::::insert(40, Banned); - old::Strikes::::insert(20, 1); - old::Strikes::::insert(30, 2); - old::Strikes::::insert(40, 5); - old::Payouts::::insert(20, vec![(1, 1)]); - old::Payouts::::insert( + v0::Members::::put(vec![10, 20, 30]); + v0::Vouching::::insert(30, Vouching); + v0::Vouching::::insert(40, Banned); + v0::Strikes::::insert(20, 1); + v0::Strikes::::insert(30, 2); + v0::Strikes::::insert(40, 5); + v0::Payouts::::insert(20, vec![(1, 1)]); + v0::Payouts::::insert( 30, (0..=::MaxPayouts::get()) .map(|i| (i as u64, i as u64)) .collect::>(), ); - old::SuspendedMembers::::insert(40, true); + v0::SuspendedMembers::::insert(40, true); - old::Defender::::put(20); - old::DefenderVotes::::insert(10, Approve); - old::DefenderVotes::::insert(20, Approve); - old::DefenderVotes::::insert(30, Reject); + v0::Defender::::put(20); + v0::DefenderVotes::::insert(10, Approve); + v0::DefenderVotes::::insert(20, Approve); + v0::DefenderVotes::::insert(30, Reject); - old::SuspendedCandidates::::insert(50, (10, Deposit(100))); + v0::SuspendedCandidates::::insert(50, (10, Deposit(100))); - old::Candidates::::put(vec![ + v0::Candidates::::put(vec![ Bid { who: 60, kind: Deposit(100), value: 200 }, Bid { who: 70, kind: Vouch(30, 30), value: 100 }, ]); - old::Votes::::insert(60, 10, Approve); - old::Votes::::insert(70, 10, Reject); - old::Votes::::insert(70, 20, Approve); - old::Votes::::insert(70, 30, Approve); + v0::Votes::::insert(60, 10, Approve); + v0::Votes::::insert(70, 10, Reject); + v0::Votes::::insert(70, 20, Approve); + v0::Votes::::insert(70, 30, Approve); let bids = (0..=::MaxBids::get()) .map(|i| Bid { @@ -75,7 +75,7 @@ fn migration_works() { value: 10u64 + i as u64, }) .collect::>(); - old::Bids::::put(bids); + v0::Bids::::put(bids); migrations::from_original::(&mut [][..]).expect("migration failed"); migrations::assert_internal_consistency::(); diff --git a/substrate/frame/society/src/weights.rs b/substrate/frame/society/src/weights.rs index c32c2383ac99351ae53b97bdcbea8cd5dfad7744..1245f76297216aea9bcc4c0c4b674f66d02a10c2 100644 --- a/substrate/frame/society/src/weights.rs +++ b/substrate/frame/society/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -15,35 +15,41 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_society +//! Autogenerated weights for `pallet_society` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-09-13, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev // --steps=50 // --repeat=20 // --pallet=pallet_society +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs -// --header=./HEADER-APACHE2 -// --output=./frame/society/src/weights.rs +// --heap-pages=4096 +// --output=./substrate/frame/society/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_society. +/// Weight functions needed for `pallet_society`. pub trait WeightInfo { fn bid() -> Weight; fn unbid() -> Weight; @@ -67,309 +73,739 @@ pub trait WeightInfo { fn cleanup_challenge() -> Weight; } -/// Weights for pallet_society using the Substrate node and recommended hardware. +/// Weights for `pallet_society` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - // Storage: Society Bids (r:1 w:1) - // Storage: Society Candidates (r:1 w:0) - // Storage: Society Members (r:1 w:0) - // Storage: Society SuspendedMembers (r:1 w:0) - // Storage: Society Parameters (r:1 w:0) + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:0) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::SuspendedMembers` (r:1 w:0) + /// Proof: `Society::SuspendedMembers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn bid() -> Weight { - Weight::zero() - } - // Storage: Society Bids (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `444` + // Estimated: `3909` + // Minimum execution time: 29_905_000 picoseconds. + Weight::from_parts(31_031_000, 3909) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn unbid() -> Weight { - Weight::zero() - } - // Storage: Society Bids (r:1 w:1) - // Storage: Society Candidates (r:1 w:0) - // Storage: Society Members (r:2 w:1) - // Storage: Society SuspendedMembers (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `461` + // Estimated: `1946` + // Minimum execution time: 23_038_000 picoseconds. + Weight::from_parts(23_904_000, 1946) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:0) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:2 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::SuspendedMembers` (r:1 w:0) + /// Proof: `Society::SuspendedMembers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn vouch() -> Weight { - Weight::zero() - } - // Storage: Society Bids (r:1 w:1) - // Storage: Society Members (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `481` + // Estimated: `6421` + // Minimum execution time: 21_197_000 picoseconds. + Weight::from_parts(22_043_000, 6421) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) fn unvouch() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society Members (r:1 w:0) - // Storage: Society Votes (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `535` + // Estimated: `4000` + // Minimum execution time: 14_402_000 picoseconds. + Weight::from_parts(15_171_000, 4000) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:1 w:1) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn vote() -> Weight { - Weight::zero() - } - // Storage: Society Defending (r:1 w:1) - // Storage: Society Members (r:1 w:0) - // Storage: Society ChallengeRoundCount (r:1 w:0) - // Storage: Society DefenderVotes (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `569` + // Estimated: `4034` + // Minimum execution time: 21_930_000 picoseconds. + Weight::from_parts(22_666_000, 4034) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Defending` (r:1 w:1) + /// Proof: `Society::Defending` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::ChallengeRoundCount` (r:1 w:0) + /// Proof: `Society::ChallengeRoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::DefenderVotes` (r:1 w:1) + /// Proof: `Society::DefenderVotes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn defender_vote() -> Weight { - Weight::zero() - } - // Storage: Society Members (r:1 w:0) - // Storage: Society Payouts (r:1 w:1) - // Storage: System Account (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `561` + // Estimated: `4026` + // Minimum execution time: 18_821_000 picoseconds. + Weight::from_parts(19_633_000, 4026) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Payouts` (r:1 w:1) + /// Proof: `Society::Payouts` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { - Weight::zero() - } - // Storage: Society Members (r:1 w:1) - // Storage: Society Payouts (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `650` + // Estimated: `4115` + // Minimum execution time: 47_262_000 picoseconds. + Weight::from_parts(48_313_000, 4115) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Members` (r:1 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Payouts` (r:1 w:1) + /// Proof: `Society::Payouts` (`max_values`: None, `max_size`: None, mode: `Measured`) fn waive_repay() -> Weight { - Weight::zero() - } - // Storage: Society Head (r:1 w:1) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society MemberByIndex (r:0 w:1) - // Storage: Society Founder (r:0 w:1) - // Storage: Society Rules (r:0 w:1) - // Storage: Society Members (r:0 w:1) - // Storage: Society Parameters (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `547` + // Estimated: `4012` + // Minimum execution time: 18_189_000 picoseconds. + Weight::from_parts(19_038_000, 4012) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Head` (r:1 w:1) + /// Proof: `Society::Head` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberByIndex` (r:0 w:1) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Founder` (r:0 w:1) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Rules` (r:0 w:1) + /// Proof: `Society::Rules` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:0 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:0 w:1) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn found_society() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:1) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society Head (r:0 w:1) - // Storage: Society Defending (r:0 w:1) - // Storage: Society ChallengeRoundCount (r:0 w:1) - // Storage: Society MemberByIndex (r:0 w:5) - // Storage: Society Skeptic (r:0 w:1) - // Storage: Society Candidates (r:0 w:4) - // Storage: Society Pot (r:0 w:1) - // Storage: Society Rules (r:0 w:1) - // Storage: Society Votes (r:0 w:4) - // Storage: Society Members (r:0 w:5) - // Storage: Society RoundCount (r:0 w:1) - // Storage: Society Bids (r:0 w:1) - // Storage: Society Parameters (r:0 w:1) - // Storage: Society NextHead (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `1665` + // Minimum execution time: 14_815_000 picoseconds. + Weight::from_parts(15_426_000, 1665) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) + } + /// Storage: `Society::Founder` (r:1 w:1) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:5 w:5) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberByIndex` (r:5 w:5) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:4 w:4) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:4 w:4) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Head` (r:0 w:1) + /// Proof: `Society::Head` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Defending` (r:0 w:1) + /// Proof: `Society::Defending` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::ChallengeRoundCount` (r:0 w:1) + /// Proof: `Society::ChallengeRoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Skeptic` (r:0 w:1) + /// Proof: `Society::Skeptic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Pot` (r:0 w:1) + /// Proof: `Society::Pot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Rules` (r:0 w:1) + /// Proof: `Society::Rules` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:0 w:1) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Bids` (r:0 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:0 w:1) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::NextHead` (r:0 w:1) + /// Proof: `Society::NextHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn dissolve() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society SuspendedMembers (r:1 w:1) - // Storage: Society Payouts (r:1 w:0) - // Storage: Society Pot (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `1654` + // Estimated: `15019` + // Minimum execution time: 57_787_000 picoseconds. + Weight::from_parts(59_489_000, 15019) + .saturating_add(T::DbWeight::get().reads(20_u64)) + .saturating_add(T::DbWeight::get().writes(30_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::SuspendedMembers` (r:1 w:1) + /// Proof: `Society::SuspendedMembers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Payouts` (r:1 w:0) + /// Proof: `Society::Payouts` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Pot` (r:1 w:1) + /// Proof: `Society::Pot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn judge_suspended_member() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society MemberCount (r:1 w:0) - // Storage: Society Parameters (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `505` + // Estimated: `3970` + // Minimum execution time: 19_262_000 picoseconds. + Weight::from_parts(19_752_000, 3970) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:0) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:0 w:1) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_parameters() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) - // Storage: Society Skeptic (r:1 w:0) - // Storage: Society Votes (r:1 w:0) - // Storage: Society Members (r:1 w:1) - // Storage: Society Parameters (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `387` + // Estimated: `1872` + // Minimum execution time: 10_656_000 picoseconds. + Weight::from_parts(11_063_000, 1872) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Skeptic` (r:1 w:0) + /// Proof: `Society::Skeptic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:1 w:0) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn punish_skeptic() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) - // Storage: Society Parameters (r:1 w:0) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society NextHead (r:1 w:1) - // Storage: System Account (r:1 w:1) - // Storage: Society MemberByIndex (r:0 w:1) - // Storage: Society Members (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `636` + // Estimated: `4101` + // Minimum execution time: 22_837_000 picoseconds. + Weight::from_parts(23_738_000, 4101) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::NextHead` (r:1 w:1) + /// Proof: `Society::NextHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Society::MemberByIndex` (r:0 w:1) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:0 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_membership() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) - // Storage: Society Parameters (r:1 w:0) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society NextHead (r:1 w:1) - // Storage: System Account (r:1 w:1) - // Storage: Society MemberByIndex (r:0 w:1) - // Storage: Society Members (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `632` + // Estimated: `4097` + // Minimum execution time: 35_142_000 picoseconds. + Weight::from_parts(36_811_000, 4097) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::NextHead` (r:1 w:1) + /// Proof: `Society::NextHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Society::MemberByIndex` (r:0 w:1) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:0 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) fn bestow_membership() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `650` + // Estimated: `4115` + // Minimum execution time: 37_133_000 picoseconds. + Weight::from_parts(38_366_000, 4115) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn kick_candidate() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `776` + // Estimated: `6196` + // Minimum execution time: 37_033_000 picoseconds. + Weight::from_parts(38_293_000, 6196) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn resign_candidacy() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `746` + // Estimated: `6196` + // Minimum execution time: 35_203_000 picoseconds. + Weight::from_parts(36_252_000, 6196) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_candidate() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:0) - // Storage: Society VoteClearCursor (r:1 w:0) - // Storage: Society Votes (r:0 w:2) + // Proof Size summary in bytes: + // Measured: `758` + // Estimated: `6196` + // Minimum execution time: 35_518_000 picoseconds. + Weight::from_parts(36_508_000, 6196) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:0) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::VoteClearCursor` (r:1 w:0) + /// Proof: `Society::VoteClearCursor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:2 w:2) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn cleanup_candidacy() -> Weight { - Weight::zero() - } - // Storage: Society ChallengeRoundCount (r:1 w:0) - // Storage: Society DefenderVotes (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `552` + // Estimated: `6492` + // Minimum execution time: 17_001_000 picoseconds. + Weight::from_parts(17_845_000, 6492) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::ChallengeRoundCount` (r:1 w:0) + /// Proof: `Society::ChallengeRoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::DefenderVotes` (r:1 w:1) + /// Proof: `Society::DefenderVotes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn cleanup_challenge() -> Weight { - Weight::zero() + // Proof Size summary in bytes: + // Measured: `510` + // Estimated: `3975` + // Minimum execution time: 11_583_000 picoseconds. + Weight::from_parts(12_134_000, 3975) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - // Storage: Society Bids (r:1 w:1) - // Storage: Society Candidates (r:1 w:0) - // Storage: Society Members (r:1 w:0) - // Storage: Society SuspendedMembers (r:1 w:0) - // Storage: Society Parameters (r:1 w:0) + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:0) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::SuspendedMembers` (r:1 w:0) + /// Proof: `Society::SuspendedMembers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn bid() -> Weight { - Weight::zero() - } - // Storage: Society Bids (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `444` + // Estimated: `3909` + // Minimum execution time: 29_905_000 picoseconds. + Weight::from_parts(31_031_000, 3909) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn unbid() -> Weight { - Weight::zero() - } - // Storage: Society Bids (r:1 w:1) - // Storage: Society Candidates (r:1 w:0) - // Storage: Society Members (r:2 w:1) - // Storage: Society SuspendedMembers (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `461` + // Estimated: `1946` + // Minimum execution time: 23_038_000 picoseconds. + Weight::from_parts(23_904_000, 1946) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:0) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:2 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::SuspendedMembers` (r:1 w:0) + /// Proof: `Society::SuspendedMembers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn vouch() -> Weight { - Weight::zero() - } - // Storage: Society Bids (r:1 w:1) - // Storage: Society Members (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `481` + // Estimated: `6421` + // Minimum execution time: 21_197_000 picoseconds. + Weight::from_parts(22_043_000, 6421) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) fn unvouch() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society Members (r:1 w:0) - // Storage: Society Votes (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `535` + // Estimated: `4000` + // Minimum execution time: 14_402_000 picoseconds. + Weight::from_parts(15_171_000, 4000) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:1 w:1) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn vote() -> Weight { - Weight::zero() - } - // Storage: Society Defending (r:1 w:1) - // Storage: Society Members (r:1 w:0) - // Storage: Society ChallengeRoundCount (r:1 w:0) - // Storage: Society DefenderVotes (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `569` + // Estimated: `4034` + // Minimum execution time: 21_930_000 picoseconds. + Weight::from_parts(22_666_000, 4034) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Defending` (r:1 w:1) + /// Proof: `Society::Defending` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::ChallengeRoundCount` (r:1 w:0) + /// Proof: `Society::ChallengeRoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::DefenderVotes` (r:1 w:1) + /// Proof: `Society::DefenderVotes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn defender_vote() -> Weight { - Weight::zero() - } - // Storage: Society Members (r:1 w:0) - // Storage: Society Payouts (r:1 w:1) - // Storage: System Account (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `561` + // Estimated: `4026` + // Minimum execution time: 18_821_000 picoseconds. + Weight::from_parts(19_633_000, 4026) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Payouts` (r:1 w:1) + /// Proof: `Society::Payouts` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { - Weight::zero() - } - // Storage: Society Members (r:1 w:1) - // Storage: Society Payouts (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `650` + // Estimated: `4115` + // Minimum execution time: 47_262_000 picoseconds. + Weight::from_parts(48_313_000, 4115) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Members` (r:1 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Payouts` (r:1 w:1) + /// Proof: `Society::Payouts` (`max_values`: None, `max_size`: None, mode: `Measured`) fn waive_repay() -> Weight { - Weight::zero() - } - // Storage: Society Head (r:1 w:1) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society MemberByIndex (r:0 w:1) - // Storage: Society Founder (r:0 w:1) - // Storage: Society Rules (r:0 w:1) - // Storage: Society Members (r:0 w:1) - // Storage: Society Parameters (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `547` + // Estimated: `4012` + // Minimum execution time: 18_189_000 picoseconds. + Weight::from_parts(19_038_000, 4012) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Head` (r:1 w:1) + /// Proof: `Society::Head` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberByIndex` (r:0 w:1) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Founder` (r:0 w:1) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Rules` (r:0 w:1) + /// Proof: `Society::Rules` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:0 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:0 w:1) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn found_society() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:1) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society Head (r:0 w:1) - // Storage: Society Defending (r:0 w:1) - // Storage: Society ChallengeRoundCount (r:0 w:1) - // Storage: Society MemberByIndex (r:0 w:5) - // Storage: Society Skeptic (r:0 w:1) - // Storage: Society Candidates (r:0 w:4) - // Storage: Society Pot (r:0 w:1) - // Storage: Society Rules (r:0 w:1) - // Storage: Society Votes (r:0 w:4) - // Storage: Society Members (r:0 w:5) - // Storage: Society RoundCount (r:0 w:1) - // Storage: Society Bids (r:0 w:1) - // Storage: Society Parameters (r:0 w:1) - // Storage: Society NextHead (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `1665` + // Minimum execution time: 14_815_000 picoseconds. + Weight::from_parts(15_426_000, 1665) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) + } + /// Storage: `Society::Founder` (r:1 w:1) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:5 w:5) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberByIndex` (r:5 w:5) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:4 w:4) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:4 w:4) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Head` (r:0 w:1) + /// Proof: `Society::Head` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Defending` (r:0 w:1) + /// Proof: `Society::Defending` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::ChallengeRoundCount` (r:0 w:1) + /// Proof: `Society::ChallengeRoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Skeptic` (r:0 w:1) + /// Proof: `Society::Skeptic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Pot` (r:0 w:1) + /// Proof: `Society::Pot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Rules` (r:0 w:1) + /// Proof: `Society::Rules` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:0 w:1) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Bids` (r:0 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:0 w:1) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::NextHead` (r:0 w:1) + /// Proof: `Society::NextHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn dissolve() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society SuspendedMembers (r:1 w:1) - // Storage: Society Payouts (r:1 w:0) - // Storage: Society Pot (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `1654` + // Estimated: `15019` + // Minimum execution time: 57_787_000 picoseconds. + Weight::from_parts(59_489_000, 15019) + .saturating_add(RocksDbWeight::get().reads(20_u64)) + .saturating_add(RocksDbWeight::get().writes(30_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::SuspendedMembers` (r:1 w:1) + /// Proof: `Society::SuspendedMembers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Payouts` (r:1 w:0) + /// Proof: `Society::Payouts` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Pot` (r:1 w:1) + /// Proof: `Society::Pot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn judge_suspended_member() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society MemberCount (r:1 w:0) - // Storage: Society Parameters (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `505` + // Estimated: `3970` + // Minimum execution time: 19_262_000 picoseconds. + Weight::from_parts(19_752_000, 3970) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:0) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:0 w:1) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_parameters() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) - // Storage: Society Skeptic (r:1 w:0) - // Storage: Society Votes (r:1 w:0) - // Storage: Society Members (r:1 w:1) - // Storage: Society Parameters (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `387` + // Estimated: `1872` + // Minimum execution time: 10_656_000 picoseconds. + Weight::from_parts(11_063_000, 1872) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Skeptic` (r:1 w:0) + /// Proof: `Society::Skeptic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:1 w:0) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn punish_skeptic() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) - // Storage: Society Parameters (r:1 w:0) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society NextHead (r:1 w:1) - // Storage: System Account (r:1 w:1) - // Storage: Society MemberByIndex (r:0 w:1) - // Storage: Society Members (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `636` + // Estimated: `4101` + // Minimum execution time: 22_837_000 picoseconds. + Weight::from_parts(23_738_000, 4101) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::NextHead` (r:1 w:1) + /// Proof: `Society::NextHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Society::MemberByIndex` (r:0 w:1) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:0 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_membership() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) - // Storage: Society Parameters (r:1 w:0) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society NextHead (r:1 w:1) - // Storage: System Account (r:1 w:1) - // Storage: Society MemberByIndex (r:0 w:1) - // Storage: Society Members (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `632` + // Estimated: `4097` + // Minimum execution time: 35_142_000 picoseconds. + Weight::from_parts(36_811_000, 4097) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::NextHead` (r:1 w:1) + /// Proof: `Society::NextHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Society::MemberByIndex` (r:0 w:1) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:0 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) fn bestow_membership() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `650` + // Estimated: `4115` + // Minimum execution time: 37_133_000 picoseconds. + Weight::from_parts(38_366_000, 4115) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn kick_candidate() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `776` + // Estimated: `6196` + // Minimum execution time: 37_033_000 picoseconds. + Weight::from_parts(38_293_000, 6196) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn resign_candidacy() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `746` + // Estimated: `6196` + // Minimum execution time: 35_203_000 picoseconds. + Weight::from_parts(36_252_000, 6196) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_candidate() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:0) - // Storage: Society VoteClearCursor (r:1 w:0) - // Storage: Society Votes (r:0 w:2) + // Proof Size summary in bytes: + // Measured: `758` + // Estimated: `6196` + // Minimum execution time: 35_518_000 picoseconds. + Weight::from_parts(36_508_000, 6196) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:0) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::VoteClearCursor` (r:1 w:0) + /// Proof: `Society::VoteClearCursor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:2 w:2) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn cleanup_candidacy() -> Weight { - Weight::zero() - } - // Storage: Society ChallengeRoundCount (r:1 w:0) - // Storage: Society DefenderVotes (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `552` + // Estimated: `6492` + // Minimum execution time: 17_001_000 picoseconds. + Weight::from_parts(17_845_000, 6492) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::ChallengeRoundCount` (r:1 w:0) + /// Proof: `Society::ChallengeRoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::DefenderVotes` (r:1 w:1) + /// Proof: `Society::DefenderVotes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn cleanup_challenge() -> Weight { - Weight::zero() + // Proof Size summary in bytes: + // Measured: `510` + // Estimated: `3975` + // Minimum execution time: 11_583_000 picoseconds. + Weight::from_parts(12_134_000, 3975) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index a1715ba4900102595ac6ec926a0aeeb2a2be1650..549e177491d32ffa18e390a2f83027d3152f0066 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -163,6 +163,12 @@ pub mod runtime { ConstU32, ConstU64, ConstU8, }; + /// Primary types used to parameterize `EnsureOrigin` and `EnsureRootWithArg`. + pub use frame_system::{ + EnsureNever, EnsureNone, EnsureRoot, EnsureRootWithSuccess, EnsureSigned, + EnsureSignedBy, + }; + /// Types to define your runtime version. pub use sp_version::{create_runtime_str, runtime_version, RuntimeVersion}; @@ -191,7 +197,7 @@ pub mod runtime { // Types often used in the runtime APIs. pub use sp_core::OpaqueMetadata; pub use sp_inherents::{CheckInherentsResult, InherentData}; - pub use sp_runtime::ApplyExtrinsicResult; + pub use sp_runtime::{ApplyExtrinsicResult, ExtrinsicInclusionMode}; pub use frame_system_rpc_runtime_api::*; pub use sp_api::{self, *}; @@ -243,8 +249,8 @@ pub mod runtime { /// The block type, which should be fed into [`frame_system::Config`]. /// - /// Should be parameterized with `T: frame_system::Config` and a tuple of `SignedExtension`. - /// When in doubt, use [`SystemSignedExtensionsOf`]. + /// Should be parameterized with `T: frame_system::Config` and a tuple of + /// `TransactionExtension`. When in doubt, use [`SystemTransactionExtensionsOf`]. // Note that this cannot be dependent on `T` for block-number because it would lead to a // circular dependency (self-referential generics). pub type BlockOf = generic::Block>; @@ -258,7 +264,7 @@ pub mod runtime { /// Default set of signed extensions exposed from the `frame_system`. /// /// crucially, this does NOT contain any tx-payment extension. - pub type SystemSignedExtensionsOf = ( + pub type SystemTransactionExtensionsOf = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 98984be9920d782bcfbe4bd1dec733294dc604e7..3e3b1113a8198ba4d1718cf22f36c3568057c0ff 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -67,11 +67,11 @@ pub mod v14 { pub struct MigrateToV14(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV14 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let in_code = Pallet::::in_code_storage_version(); let on_chain = Pallet::::on_chain_storage_version(); - if current == 14 && on_chain == 13 { - current.put::>(); + if in_code == 14 && on_chain == 13 { + in_code.put::>(); log!(info, "v14 applied successfully."); T::DbWeight::get().reads_writes(1, 1) @@ -108,12 +108,12 @@ pub mod v13 { } fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let in_code = Pallet::::in_code_storage_version(); let onchain = StorageVersion::::get(); - if current == 13 && onchain == ObsoleteReleases::V12_0_0 { + if in_code == 13 && onchain == ObsoleteReleases::V12_0_0 { StorageVersion::::kill(); - current.put::>(); + in_code.put::>(); log!(info, "v13 applied successfully"); T::DbWeight::get().reads_writes(1, 2) diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index e0213efd507fd46d6d7d8800b6d02c89229a13af..992a7fe2c9c6218c7d132571a2f0cab0ee6618a4 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -66,7 +66,7 @@ pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(14); #[pallet::pallet] diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index 6f729e08ba5c839997cd81239f0e4bd39728df14..8288591a787bb3f1e8df0bdb19c383f5ecfca396 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_staking` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-q7z7ruxr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_staking +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_staking -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/staking/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -99,8 +101,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `927` // Estimated: `4764` - // Minimum execution time: 42_042_000 picoseconds. - Weight::from_parts(43_292_000, 4764) + // Minimum execution time: 41_318_000 picoseconds. + Weight::from_parts(43_268_000, 4764) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -120,8 +122,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 85_050_000 picoseconds. - Weight::from_parts(87_567_000, 8877) + // Minimum execution time: 85_666_000 picoseconds. + Weight::from_parts(88_749_000, 8877) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -147,8 +149,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 89_076_000 picoseconds. - Weight::from_parts(92_715_000, 8877) + // Minimum execution time: 90_282_000 picoseconds. + Weight::from_parts(92_332_000, 8877) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -162,16 +164,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1115` + // Measured: `1297` // Estimated: `4764` - // Minimum execution time: 42_067_000 picoseconds. - Weight::from_parts(43_239_807, 4764) - // Standard Error: 831 - .saturating_add(Weight::from_parts(46_257, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Minimum execution time: 44_626_000 picoseconds. + Weight::from_parts(47_254_657, 4764) + // Standard Error: 1_179 + .saturating_add(Weight::from_parts(57_657, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) @@ -207,10 +211,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_490_000 picoseconds. - Weight::from_parts(95_358_751, 6248) - // Standard Error: 3_952 - .saturating_add(Weight::from_parts(1_294_907, 0).saturating_mul(s.into())) + // Minimum execution time: 86_769_000 picoseconds. + Weight::from_parts(95_212_867, 6248) + // Standard Error: 3_706 + .saturating_add(Weight::from_parts(1_320_752, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -242,8 +246,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 50_326_000 picoseconds. - Weight::from_parts(52_253_000, 4556) + // Minimum execution time: 50_410_000 picoseconds. + Weight::from_parts(52_576_000, 4556) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -256,10 +260,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1280 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 29_305_000 picoseconds. - Weight::from_parts(32_199_604, 4556) - // Standard Error: 7_150 - .saturating_add(Weight::from_parts(6_437_124, 0).saturating_mul(k.into())) + // Minimum execution time: 29_017_000 picoseconds. + Weight::from_parts(33_019_206, 4556) + // Standard Error: 6_368 + .saturating_add(Weight::from_parts(6_158_681, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -292,10 +296,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 63_267_000 picoseconds. - Weight::from_parts(61_741_404, 6248) - // Standard Error: 12_955 - .saturating_add(Weight::from_parts(3_811_743, 0).saturating_mul(n.into())) + // Minimum execution time: 63_452_000 picoseconds. + Weight::from_parts(60_924_066, 6248) + // Standard Error: 14_416 + .saturating_add(Weight::from_parts(3_921_589, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -319,8 +323,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `6248` - // Minimum execution time: 52_862_000 picoseconds. - Weight::from_parts(54_108_000, 6248) + // Minimum execution time: 54_253_000 picoseconds. + Weight::from_parts(55_286_000, 6248) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -334,8 +338,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_350_000 picoseconds. - Weight::from_parts(16_802_000, 4556) + // Minimum execution time: 16_812_000 picoseconds. + Weight::from_parts(17_380_000, 4556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -349,8 +353,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `969` // Estimated: `4556` - // Minimum execution time: 19_981_000 picoseconds. - Weight::from_parts(20_539_000, 4556) + // Minimum execution time: 20_590_000 picoseconds. + Weight::from_parts(21_096_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -362,8 +366,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 19_304_000 picoseconds. - Weight::from_parts(20_000_000, 4556) + // Minimum execution time: 19_923_000 picoseconds. + Weight::from_parts(20_880_000, 4556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -373,8 +377,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_568_000 picoseconds. - Weight::from_parts(2_708_000, 0) + // Minimum execution time: 2_599_000 picoseconds. + Weight::from_parts(2_751_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -383,8 +387,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_950_000 picoseconds. - Weight::from_parts(8_348_000, 0) + // Minimum execution time: 6_941_000 picoseconds. + Weight::from_parts(7_288_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -393,8 +397,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_967_000 picoseconds. - Weight::from_parts(8_222_000, 0) + // Minimum execution time: 6_757_000 picoseconds. + Weight::from_parts(7_200_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -403,8 +407,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_006_000 picoseconds. - Weight::from_parts(8_440_000, 0) + // Minimum execution time: 7_028_000 picoseconds. + Weight::from_parts(7_366_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -414,10 +418,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_524_000 picoseconds. - Weight::from_parts(3_123_608, 0) - // Standard Error: 59 - .saturating_add(Weight::from_parts(11_596, 0).saturating_mul(v.into())) + // Minimum execution time: 2_741_000 picoseconds. + Weight::from_parts(3_212_598, 0) + // Standard Error: 68 + .saturating_add(Weight::from_parts(11_695, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Ledger` (r:5900 w:11800) @@ -431,10 +435,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1356 + i * (151 ±0)` // Estimated: `990 + i * (3566 ±0)` - // Minimum execution time: 2_092_000 picoseconds. - Weight::from_parts(2_258_000, 990) - // Standard Error: 32_695 - .saturating_add(Weight::from_parts(16_669_219, 0).saturating_mul(i.into())) + // Minimum execution time: 2_132_000 picoseconds. + Weight::from_parts(2_289_000, 990) + // Standard Error: 34_227 + .saturating_add(Weight::from_parts(17_006_583, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) @@ -472,10 +476,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 84_275_000 picoseconds. - Weight::from_parts(92_512_416, 6248) - // Standard Error: 3_633 - .saturating_add(Weight::from_parts(1_315_923, 0).saturating_mul(s.into())) + // Minimum execution time: 85_308_000 picoseconds. + Weight::from_parts(93_689_468, 6248) + // Standard Error: 5_425 + .saturating_add(Weight::from_parts(1_307_604, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -488,10 +492,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 101_707_000 picoseconds. - Weight::from_parts(912_819_462, 70137) - // Standard Error: 57_547 - .saturating_add(Weight::from_parts(4_856_799, 0).saturating_mul(s.into())) + // Minimum execution time: 103_242_000 picoseconds. + Weight::from_parts(1_162_296_080, 70137) + // Standard Error: 76_741 + .saturating_add(Weight::from_parts(6_487_522, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -528,10 +532,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `33297 + n * (377 ±0)` // Estimated: `30944 + n * (3774 ±0)` - // Minimum execution time: 138_657_000 picoseconds. - Weight::from_parts(167_173_445, 30944) - // Standard Error: 25_130 - .saturating_add(Weight::from_parts(44_566_012, 0).saturating_mul(n.into())) + // Minimum execution time: 140_740_000 picoseconds. + Weight::from_parts(182_886_963, 30944) + // Standard Error: 39_852 + .saturating_add(Weight::from_parts(43_140_752, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -555,10 +559,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 80_061_000 picoseconds. - Weight::from_parts(82_836_434, 8877) - // Standard Error: 4_348 - .saturating_add(Weight::from_parts(75_744, 0).saturating_mul(l.into())) + // Minimum execution time: 80_372_000 picoseconds. + Weight::from_parts(83_335_027, 8877) + // Standard Error: 4_780 + .saturating_add(Weight::from_parts(72_180, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -593,10 +597,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 92_560_000 picoseconds. - Weight::from_parts(97_684_741, 6248) - // Standard Error: 3_361 - .saturating_add(Weight::from_parts(1_292_732, 0).saturating_mul(s.into())) + // Minimum execution time: 93_920_000 picoseconds. + Weight::from_parts(98_022_911, 6248) + // Standard Error: 4_096 + .saturating_add(Weight::from_parts(1_287_745, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -642,12 +646,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 564_963_000 picoseconds. - Weight::from_parts(569_206_000, 512390) - // Standard Error: 2_033_235 - .saturating_add(Weight::from_parts(68_025_841, 0).saturating_mul(v.into())) - // Standard Error: 202_600 - .saturating_add(Weight::from_parts(17_916_770, 0).saturating_mul(n.into())) + // Minimum execution time: 556_012_000 picoseconds. + Weight::from_parts(560_339_000, 512390) + // Standard Error: 2_115_076 + .saturating_add(Weight::from_parts(69_456_497, 0).saturating_mul(v.into())) + // Standard Error: 210_755 + .saturating_add(Weight::from_parts(17_974_873, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(206_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -678,12 +682,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 32_196_540_000 picoseconds. - Weight::from_parts(32_341_871_000, 512390) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(5_143_440, 0).saturating_mul(v.into())) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(3_328_189, 0).saturating_mul(n.into())) + // Minimum execution time: 32_792_819_000 picoseconds. + Weight::from_parts(32_986_606_000, 512390) + // Standard Error: 360_905 + .saturating_add(Weight::from_parts(5_260_151, 0).saturating_mul(v.into())) + // Standard Error: 360_905 + .saturating_add(Weight::from_parts(3_599_219, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(201_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -700,10 +704,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_381_903_000 picoseconds. - Weight::from_parts(32_693_059, 3510) - // Standard Error: 10_000 - .saturating_add(Weight::from_parts(4_736_173, 0).saturating_mul(v.into())) + // Minimum execution time: 2_434_755_000 picoseconds. + Weight::from_parts(38_574_764, 3510) + // Standard Error: 14_467 + .saturating_add(Weight::from_parts(4_821_702, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -714,6 +718,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -724,9 +730,9 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_434_000 picoseconds. - Weight::from_parts(5_742_000, 0) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Minimum execution time: 5_765_000 picoseconds. + Weight::from_parts(6_140_000, 0) + .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -734,6 +740,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -744,9 +752,9 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_588_000 picoseconds. - Weight::from_parts(4_854_000, 0) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Minimum execution time: 5_025_000 picoseconds. + Weight::from_parts(5_354_000, 0) + .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) @@ -774,8 +782,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1939` // Estimated: `6248` - // Minimum execution time: 68_780_000 picoseconds. - Weight::from_parts(71_479_000, 6248) + // Minimum execution time: 69_656_000 picoseconds. + Weight::from_parts(71_574_000, 6248) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -787,8 +795,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_268_000 picoseconds. - Weight::from_parts(12_661_000, 3510) + // Minimum execution time: 12_523_000 picoseconds. + Weight::from_parts(13_315_000, 3510) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -798,8 +806,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_071_000 picoseconds. - Weight::from_parts(3_334_000, 0) + // Minimum execution time: 3_125_000 picoseconds. + Weight::from_parts(3_300_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -820,8 +828,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `927` // Estimated: `4764` - // Minimum execution time: 42_042_000 picoseconds. - Weight::from_parts(43_292_000, 4764) + // Minimum execution time: 41_318_000 picoseconds. + Weight::from_parts(43_268_000, 4764) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -841,8 +849,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 85_050_000 picoseconds. - Weight::from_parts(87_567_000, 8877) + // Minimum execution time: 85_666_000 picoseconds. + Weight::from_parts(88_749_000, 8877) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -868,8 +876,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 89_076_000 picoseconds. - Weight::from_parts(92_715_000, 8877) + // Minimum execution time: 90_282_000 picoseconds. + Weight::from_parts(92_332_000, 8877) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -883,16 +891,18 @@ impl WeightInfo for () { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1115` + // Measured: `1297` // Estimated: `4764` - // Minimum execution time: 42_067_000 picoseconds. - Weight::from_parts(43_239_807, 4764) - // Standard Error: 831 - .saturating_add(Weight::from_parts(46_257, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Minimum execution time: 44_626_000 picoseconds. + Weight::from_parts(47_254_657, 4764) + // Standard Error: 1_179 + .saturating_add(Weight::from_parts(57_657, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) @@ -928,10 +938,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_490_000 picoseconds. - Weight::from_parts(95_358_751, 6248) - // Standard Error: 3_952 - .saturating_add(Weight::from_parts(1_294_907, 0).saturating_mul(s.into())) + // Minimum execution time: 86_769_000 picoseconds. + Weight::from_parts(95_212_867, 6248) + // Standard Error: 3_706 + .saturating_add(Weight::from_parts(1_320_752, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -963,8 +973,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 50_326_000 picoseconds. - Weight::from_parts(52_253_000, 4556) + // Minimum execution time: 50_410_000 picoseconds. + Weight::from_parts(52_576_000, 4556) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -977,10 +987,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1280 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 29_305_000 picoseconds. - Weight::from_parts(32_199_604, 4556) - // Standard Error: 7_150 - .saturating_add(Weight::from_parts(6_437_124, 0).saturating_mul(k.into())) + // Minimum execution time: 29_017_000 picoseconds. + Weight::from_parts(33_019_206, 4556) + // Standard Error: 6_368 + .saturating_add(Weight::from_parts(6_158_681, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -1013,10 +1023,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 63_267_000 picoseconds. - Weight::from_parts(61_741_404, 6248) - // Standard Error: 12_955 - .saturating_add(Weight::from_parts(3_811_743, 0).saturating_mul(n.into())) + // Minimum execution time: 63_452_000 picoseconds. + Weight::from_parts(60_924_066, 6248) + // Standard Error: 14_416 + .saturating_add(Weight::from_parts(3_921_589, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -1040,8 +1050,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `6248` - // Minimum execution time: 52_862_000 picoseconds. - Weight::from_parts(54_108_000, 6248) + // Minimum execution time: 54_253_000 picoseconds. + Weight::from_parts(55_286_000, 6248) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1055,8 +1065,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_350_000 picoseconds. - Weight::from_parts(16_802_000, 4556) + // Minimum execution time: 16_812_000 picoseconds. + Weight::from_parts(17_380_000, 4556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1070,8 +1080,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `969` // Estimated: `4556` - // Minimum execution time: 19_981_000 picoseconds. - Weight::from_parts(20_539_000, 4556) + // Minimum execution time: 20_590_000 picoseconds. + Weight::from_parts(21_096_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1083,8 +1093,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 19_304_000 picoseconds. - Weight::from_parts(20_000_000, 4556) + // Minimum execution time: 19_923_000 picoseconds. + Weight::from_parts(20_880_000, 4556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1094,8 +1104,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_568_000 picoseconds. - Weight::from_parts(2_708_000, 0) + // Minimum execution time: 2_599_000 picoseconds. + Weight::from_parts(2_751_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1104,8 +1114,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_950_000 picoseconds. - Weight::from_parts(8_348_000, 0) + // Minimum execution time: 6_941_000 picoseconds. + Weight::from_parts(7_288_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1114,8 +1124,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_967_000 picoseconds. - Weight::from_parts(8_222_000, 0) + // Minimum execution time: 6_757_000 picoseconds. + Weight::from_parts(7_200_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1124,8 +1134,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_006_000 picoseconds. - Weight::from_parts(8_440_000, 0) + // Minimum execution time: 7_028_000 picoseconds. + Weight::from_parts(7_366_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -1135,10 +1145,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_524_000 picoseconds. - Weight::from_parts(3_123_608, 0) - // Standard Error: 59 - .saturating_add(Weight::from_parts(11_596, 0).saturating_mul(v.into())) + // Minimum execution time: 2_741_000 picoseconds. + Weight::from_parts(3_212_598, 0) + // Standard Error: 68 + .saturating_add(Weight::from_parts(11_695, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Ledger` (r:5900 w:11800) @@ -1152,10 +1162,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1356 + i * (151 ±0)` // Estimated: `990 + i * (3566 ±0)` - // Minimum execution time: 2_092_000 picoseconds. - Weight::from_parts(2_258_000, 990) - // Standard Error: 32_695 - .saturating_add(Weight::from_parts(16_669_219, 0).saturating_mul(i.into())) + // Minimum execution time: 2_132_000 picoseconds. + Weight::from_parts(2_289_000, 990) + // Standard Error: 34_227 + .saturating_add(Weight::from_parts(17_006_583, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) @@ -1193,10 +1203,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 84_275_000 picoseconds. - Weight::from_parts(92_512_416, 6248) - // Standard Error: 3_633 - .saturating_add(Weight::from_parts(1_315_923, 0).saturating_mul(s.into())) + // Minimum execution time: 85_308_000 picoseconds. + Weight::from_parts(93_689_468, 6248) + // Standard Error: 5_425 + .saturating_add(Weight::from_parts(1_307_604, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1209,10 +1219,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 101_707_000 picoseconds. - Weight::from_parts(912_819_462, 70137) - // Standard Error: 57_547 - .saturating_add(Weight::from_parts(4_856_799, 0).saturating_mul(s.into())) + // Minimum execution time: 103_242_000 picoseconds. + Weight::from_parts(1_162_296_080, 70137) + // Standard Error: 76_741 + .saturating_add(Weight::from_parts(6_487_522, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1249,10 +1259,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `33297 + n * (377 ±0)` // Estimated: `30944 + n * (3774 ±0)` - // Minimum execution time: 138_657_000 picoseconds. - Weight::from_parts(167_173_445, 30944) - // Standard Error: 25_130 - .saturating_add(Weight::from_parts(44_566_012, 0).saturating_mul(n.into())) + // Minimum execution time: 140_740_000 picoseconds. + Weight::from_parts(182_886_963, 30944) + // Standard Error: 39_852 + .saturating_add(Weight::from_parts(43_140_752, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -1276,10 +1286,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 80_061_000 picoseconds. - Weight::from_parts(82_836_434, 8877) - // Standard Error: 4_348 - .saturating_add(Weight::from_parts(75_744, 0).saturating_mul(l.into())) + // Minimum execution time: 80_372_000 picoseconds. + Weight::from_parts(83_335_027, 8877) + // Standard Error: 4_780 + .saturating_add(Weight::from_parts(72_180, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -1314,10 +1324,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 92_560_000 picoseconds. - Weight::from_parts(97_684_741, 6248) - // Standard Error: 3_361 - .saturating_add(Weight::from_parts(1_292_732, 0).saturating_mul(s.into())) + // Minimum execution time: 93_920_000 picoseconds. + Weight::from_parts(98_022_911, 6248) + // Standard Error: 4_096 + .saturating_add(Weight::from_parts(1_287_745, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1363,12 +1373,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 564_963_000 picoseconds. - Weight::from_parts(569_206_000, 512390) - // Standard Error: 2_033_235 - .saturating_add(Weight::from_parts(68_025_841, 0).saturating_mul(v.into())) - // Standard Error: 202_600 - .saturating_add(Weight::from_parts(17_916_770, 0).saturating_mul(n.into())) + // Minimum execution time: 556_012_000 picoseconds. + Weight::from_parts(560_339_000, 512390) + // Standard Error: 2_115_076 + .saturating_add(Weight::from_parts(69_456_497, 0).saturating_mul(v.into())) + // Standard Error: 210_755 + .saturating_add(Weight::from_parts(17_974_873, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(206_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1399,12 +1409,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 32_196_540_000 picoseconds. - Weight::from_parts(32_341_871_000, 512390) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(5_143_440, 0).saturating_mul(v.into())) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(3_328_189, 0).saturating_mul(n.into())) + // Minimum execution time: 32_792_819_000 picoseconds. + Weight::from_parts(32_986_606_000, 512390) + // Standard Error: 360_905 + .saturating_add(Weight::from_parts(5_260_151, 0).saturating_mul(v.into())) + // Standard Error: 360_905 + .saturating_add(Weight::from_parts(3_599_219, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(201_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1421,10 +1431,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_381_903_000 picoseconds. - Weight::from_parts(32_693_059, 3510) - // Standard Error: 10_000 - .saturating_add(Weight::from_parts(4_736_173, 0).saturating_mul(v.into())) + // Minimum execution time: 2_434_755_000 picoseconds. + Weight::from_parts(38_574_764, 3510) + // Standard Error: 14_467 + .saturating_add(Weight::from_parts(4_821_702, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -1435,6 +1445,8 @@ impl WeightInfo for () { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -1445,9 +1457,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_434_000 picoseconds. - Weight::from_parts(5_742_000, 0) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Minimum execution time: 5_765_000 picoseconds. + Weight::from_parts(6_140_000, 0) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -1455,6 +1467,8 @@ impl WeightInfo for () { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -1465,9 +1479,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_588_000 picoseconds. - Weight::from_parts(4_854_000, 0) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Minimum execution time: 5_025_000 picoseconds. + Weight::from_parts(5_354_000, 0) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) @@ -1495,8 +1509,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1939` // Estimated: `6248` - // Minimum execution time: 68_780_000 picoseconds. - Weight::from_parts(71_479_000, 6248) + // Minimum execution time: 69_656_000 picoseconds. + Weight::from_parts(71_574_000, 6248) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1508,8 +1522,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_268_000 picoseconds. - Weight::from_parts(12_661_000, 3510) + // Minimum execution time: 12_523_000 picoseconds. + Weight::from_parts(13_315_000, 3510) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1519,8 +1533,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_071_000 picoseconds. - Weight::from_parts(3_334_000, 0) + // Minimum execution time: 3_125_000 picoseconds. + Weight::from_parts(3_300_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index 6b3aa9934e070046ee694409b20faebf3934663f..b89159ef3ec44ede131eb3009a29f011082cb55d 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -1801,7 +1801,7 @@ mod remote_tests_local { use std::env::var as env_var; // we only use the hash type from this, so using the mock should be fine. - type Extrinsic = sp_runtime::testing::TestXt; + type Extrinsic = sp_runtime::generic::UncheckedExtrinsic; type Block = sp_runtime::testing::Block; #[tokio::test] diff --git a/substrate/frame/state-trie-migration/src/weights.rs b/substrate/frame/state-trie-migration/src/weights.rs index 8fa80b38957d9b38a40803a261d52cfe7836a990..23dad9e33800bd307b295cdf46ad286edd3af7b7 100644 --- a/substrate/frame/state-trie-migration/src/weights.rs +++ b/substrate/frame/state-trie-migration/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_state_trie_migration` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_state_trie_migration +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_state_trie_migration -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/state-trie-migration/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -64,15 +66,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: `StateTrieMigration::SignedMigrationMaxLimits` (r:1 w:0) /// Proof: `StateTrieMigration::SignedMigrationMaxLimits` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `StateTrieMigration::MigrationProcess` (r:1 w:1) /// Proof: `StateTrieMigration::MigrationProcess` (`max_values`: Some(1), `max_size`: Some(1042), added: 1537, mode: `MaxEncodedLen`) fn continue_migrate() -> Weight { // Proof Size summary in bytes: // Measured: `108` - // Estimated: `3640` - // Minimum execution time: 18_520_000 picoseconds. - Weight::from_parts(19_171_000, 3640) + // Estimated: `3658` + // Minimum execution time: 17_661_000 picoseconds. + Weight::from_parts(18_077_000, 3658) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -82,53 +84,53 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1493` - // Minimum execution time: 3_786_000 picoseconds. - Weight::from_parts(4_038_000, 1493) + // Minimum execution time: 4_036_000 picoseconds. + Weight::from_parts(4_267_000, 1493) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn migrate_custom_top_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3640` - // Minimum execution time: 11_144_000 picoseconds. - Weight::from_parts(11_556_000, 3640) + // Estimated: `3658` + // Minimum execution time: 11_348_000 picoseconds. + Weight::from_parts(11_899_000, 3658) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_top_fail() -> Weight { // Proof Size summary in bytes: // Measured: `113` - // Estimated: `3640` - // Minimum execution time: 59_288_000 picoseconds. - Weight::from_parts(60_276_000, 3640) + // Estimated: `3658` + // Minimum execution time: 59_819_000 picoseconds. + Weight::from_parts(61_066_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn migrate_custom_child_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3640` - // Minimum execution time: 11_258_000 picoseconds. - Weight::from_parts(11_626_000, 3640) + // Estimated: `3658` + // Minimum execution time: 11_424_000 picoseconds. + Weight::from_parts(11_765_000, 3658) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_child_fail() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `3640` - // Minimum execution time: 61_575_000 picoseconds. - Weight::from_parts(63_454_000, 3640) + // Estimated: `3658` + // Minimum execution time: 61_086_000 picoseconds. + Weight::from_parts(62_546_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -139,10 +141,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `197 + v * (1 ±0)` // Estimated: `3662 + v * (1 ±0)` - // Minimum execution time: 5_259_000 picoseconds. - Weight::from_parts(5_433_000, 3662) + // Minimum execution time: 5_375_000 picoseconds. + Weight::from_parts(5_552_000, 3662) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_159, 0).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(1_146, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(v.into())) @@ -154,15 +156,15 @@ impl WeightInfo for () { /// Storage: `StateTrieMigration::SignedMigrationMaxLimits` (r:1 w:0) /// Proof: `StateTrieMigration::SignedMigrationMaxLimits` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `StateTrieMigration::MigrationProcess` (r:1 w:1) /// Proof: `StateTrieMigration::MigrationProcess` (`max_values`: Some(1), `max_size`: Some(1042), added: 1537, mode: `MaxEncodedLen`) fn continue_migrate() -> Weight { // Proof Size summary in bytes: // Measured: `108` - // Estimated: `3640` - // Minimum execution time: 18_520_000 picoseconds. - Weight::from_parts(19_171_000, 3640) + // Estimated: `3658` + // Minimum execution time: 17_661_000 picoseconds. + Weight::from_parts(18_077_000, 3658) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -172,53 +174,53 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1493` - // Minimum execution time: 3_786_000 picoseconds. - Weight::from_parts(4_038_000, 1493) + // Minimum execution time: 4_036_000 picoseconds. + Weight::from_parts(4_267_000, 1493) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn migrate_custom_top_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3640` - // Minimum execution time: 11_144_000 picoseconds. - Weight::from_parts(11_556_000, 3640) + // Estimated: `3658` + // Minimum execution time: 11_348_000 picoseconds. + Weight::from_parts(11_899_000, 3658) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_top_fail() -> Weight { // Proof Size summary in bytes: // Measured: `113` - // Estimated: `3640` - // Minimum execution time: 59_288_000 picoseconds. - Weight::from_parts(60_276_000, 3640) + // Estimated: `3658` + // Minimum execution time: 59_819_000 picoseconds. + Weight::from_parts(61_066_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn migrate_custom_child_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3640` - // Minimum execution time: 11_258_000 picoseconds. - Weight::from_parts(11_626_000, 3640) + // Estimated: `3658` + // Minimum execution time: 11_424_000 picoseconds. + Weight::from_parts(11_765_000, 3658) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_child_fail() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `3640` - // Minimum execution time: 61_575_000 picoseconds. - Weight::from_parts(63_454_000, 3640) + // Estimated: `3658` + // Minimum execution time: 61_086_000 picoseconds. + Weight::from_parts(62_546_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -229,10 +231,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `197 + v * (1 ±0)` // Estimated: `3662 + v * (1 ±0)` - // Minimum execution time: 5_259_000 picoseconds. - Weight::from_parts(5_433_000, 3662) + // Minimum execution time: 5_375_000 picoseconds. + Weight::from_parts(5_552_000, 3662) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_159, 0).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(1_146, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(v.into())) diff --git a/substrate/frame/sudo/src/benchmarking.rs b/substrate/frame/sudo/src/benchmarking.rs index e64233fe7480a9d82c9c07709dd9b403544bff6a..cdd7d707600e1d2393e989add59f16349acb3492 100644 --- a/substrate/frame/sudo/src/benchmarking.rs +++ b/substrate/frame/sudo/src/benchmarking.rs @@ -20,14 +20,23 @@ use super::*; use crate::Pallet; use frame_benchmarking::v2::*; +use frame_support::dispatch::DispatchInfo; use frame_system::RawOrigin; +use sp_runtime::traits::{AsSystemOriginSigner, DispatchTransaction, Dispatchable}; fn assert_last_event(generic_event: crate::Event) { let re: ::RuntimeEvent = generic_event.into(); frame_system::Pallet::::assert_last_event(re.into()); } -#[benchmarks(where ::RuntimeCall: From>)] +#[benchmarks(where + T: Send + Sync, + ::RuntimeCall: From>, + ::RuntimeCall: Dispatchable, + <::RuntimeCall as Dispatchable>::PostInfo: From<()>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner + Clone, +)] mod benchmarks { use super::*; @@ -85,5 +94,23 @@ mod benchmarks { assert_last_event::(Event::KeyRemoved {}); } + #[benchmark] + fn check_only_sudo_account() { + let caller: T::AccountId = whitelisted_caller(); + Key::::put(&caller); + + let call = frame_system::Call::remark { remark: vec![] }.into(); + let info = DispatchInfo { ..Default::default() }; + let ext = CheckOnlySudoAccount::::new(); + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(().into())) + .unwrap() + .is_ok()); + } + } + impl_benchmark_test_suite!(Pallet, crate::mock::new_bench_ext(), crate::mock::Test); } diff --git a/substrate/frame/sudo/src/extension.rs b/substrate/frame/sudo/src/extension.rs index e90286e5a7c6bae73794bae7efd5fda4a496bb25..2cd8f945eddb9e406ec16651cb436b6a60d3ade6 100644 --- a/substrate/frame/sudo/src/extension.rs +++ b/substrate/frame/sudo/src/extension.rs @@ -20,10 +20,14 @@ use codec::{Decode, Encode}; use frame_support::{dispatch::DispatchInfo, ensure}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, SignedExtension}, + impl_tx_ext_default, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, TransactionExtension, + TransactionExtensionBase, + }, transaction_validity::{ - InvalidTransaction, TransactionPriority, TransactionValidity, TransactionValidityError, - UnknownTransaction, ValidTransaction, + InvalidTransaction, TransactionPriority, TransactionValidityError, UnknownTransaction, + ValidTransaction, }, }; use sp_std::{fmt, marker::PhantomData}; @@ -59,49 +63,61 @@ impl fmt::Debug for CheckOnlySudoAccount { } impl CheckOnlySudoAccount { - /// Creates new `SignedExtension` to check sudo key. + /// Creates new `TransactionExtension` to check sudo key. pub fn new() -> Self { Self::default() } } -impl SignedExtension for CheckOnlySudoAccount -where - ::RuntimeCall: Dispatchable, -{ +impl TransactionExtensionBase for CheckOnlySudoAccount { const IDENTIFIER: &'static str = "CheckOnlySudoAccount"; - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); + type Implicit = (); - fn additional_signed(&self) -> Result { - Ok(()) + fn weight(&self) -> frame_support::weights::Weight { + use crate::weights::WeightInfo; + T::WeightInfo::check_only_sudo_account() } +} +impl + TransactionExtension<::RuntimeCall, Context> for CheckOnlySudoAccount +where + ::RuntimeCall: Dispatchable, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner + Clone, +{ + type Pre = (); + type Val = (); fn validate( &self, - who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, + origin: <::RuntimeCall as Dispatchable>::RuntimeOrigin, + _call: &::RuntimeCall, + info: &DispatchInfoOf<::RuntimeCall>, _len: usize, - ) -> TransactionValidity { + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + ( + ValidTransaction, + Self::Val, + <::RuntimeCall as Dispatchable>::RuntimeOrigin, + ), + TransactionValidityError, + > { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; let sudo_key: T::AccountId = Key::::get().ok_or(UnknownTransaction::CannotLookup)?; ensure!(*who == sudo_key, InvalidTransaction::BadSigner); - Ok(ValidTransaction { - priority: info.weight.ref_time() as TransactionPriority, - ..Default::default() - }) + Ok(( + ValidTransaction { + priority: info.weight.ref_time() as TransactionPriority, + ..Default::default() + }, + (), + origin, + )) } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } + impl_tx_ext_default!(::RuntimeCall; Context; prepare); } diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs index 2ebe4cb015712dbf8df630a7114e6fc32c247e0d..2c953368b2215981320ba7cfd9957815c8f7a519 100644 --- a/substrate/frame/sudo/src/lib.rs +++ b/substrate/frame/sudo/src/lib.rs @@ -85,8 +85,8 @@ //! meant to be used by constructing runtime calls from outside the runtime. //! //! -//! This pallet also defines a [`SignedExtension`](sp_runtime::traits::SignedExtension) called -//! [`CheckOnlySudoAccount`] to ensure that only signed transactions by the sudo account are +//! This pallet also defines a [`TransactionExtension`](sp_runtime::traits::TransactionExtension) +//! called [`CheckOnlySudoAccount`] to ensure that only signed transactions by the sudo account are //! accepted by the transaction pool. The intended use of this signed extension is to prevent other //! accounts from spamming the transaction pool for the initial phase of a chain, during which //! developers may only want a sudo account to be able to make transactions. diff --git a/substrate/frame/sudo/src/weights.rs b/substrate/frame/sudo/src/weights.rs index 10d1a9f7a513b7ae98947e27832842a42ab0c589..aa8f69fd4e6bfaa40a5ee791b9b1fabd47b71534 100644 --- a/substrate/frame/sudo/src/weights.rs +++ b/substrate/frame/sudo/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_sudo` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_sudo +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_sudo -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/sudo/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -53,6 +55,7 @@ pub trait WeightInfo { fn sudo() -> Weight; fn sudo_as() -> Weight; fn remove_key() -> Weight; + fn check_only_sudo_account() -> Weight; } /// Weights for `pallet_sudo` using the Substrate node and recommended hardware. @@ -64,8 +67,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 9_600_000 picoseconds. - Weight::from_parts(10_076_000, 1517) + // Minimum execution time: 9_590_000 picoseconds. + Weight::from_parts(9_924_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -75,8 +78,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 10_453_000 picoseconds. - Weight::from_parts(10_931_000, 1517) + // Minimum execution time: 9_825_000 picoseconds. + Weight::from_parts(10_373_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:0) @@ -85,8 +88,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 10_202_000 picoseconds. - Weight::from_parts(10_800_000, 1517) + // Minimum execution time: 10_140_000 picoseconds. + Weight::from_parts(10_382_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:1) @@ -95,11 +98,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 8_555_000 picoseconds. - Weight::from_parts(8_846_000, 1517) + // Minimum execution time: 8_610_000 picoseconds. + Weight::from_parts(8_975_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 3_416_000 picoseconds. + Weight::from_parts(3_645_000, 1517) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } } // For backwards compatibility and tests. @@ -110,8 +123,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 9_600_000 picoseconds. - Weight::from_parts(10_076_000, 1517) + // Minimum execution time: 9_590_000 picoseconds. + Weight::from_parts(9_924_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -121,8 +134,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 10_453_000 picoseconds. - Weight::from_parts(10_931_000, 1517) + // Minimum execution time: 9_825_000 picoseconds. + Weight::from_parts(10_373_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:0) @@ -131,8 +144,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 10_202_000 picoseconds. - Weight::from_parts(10_800_000, 1517) + // Minimum execution time: 10_140_000 picoseconds. + Weight::from_parts(10_382_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:1) @@ -141,9 +154,19 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 8_555_000 picoseconds. - Weight::from_parts(8_846_000, 1517) + // Minimum execution time: 8_610_000 picoseconds. + Weight::from_parts(8_975_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 3_416_000 picoseconds. + Weight::from_parts(3_645_000, 1517) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 72e2d0bfa56938f1353a196f2fbfc95a0e68b48d..113af41751ed3f69dd23bcfe25e2ccb3715585fc 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -18,13 +18,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "6.1", default-features = false } serde = { features = ["alloc", "derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } -sp-api = { path = "../../primitives/api", default-features = false, features = ["frame-metadata"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", + "max-encoded-len", +] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } +frame-metadata = { version = "16.0.0", default-features = false, features = [ + "current", +] } +sp-api = { path = "../../primitives/api", default-features = false, features = [ + "frame-metadata", +] } sp-std = { path = "../../primitives/std", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } +sp-runtime = { path = "../../primitives/runtime", default-features = false, features = [ + "serde", +] } sp-tracing = { path = "../../primitives/tracing", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } @@ -55,6 +66,7 @@ aquamarine = { version = "0.5.0" } [dev-dependencies] assert_matches = "1.3.0" pretty_assertions = "1.2.1" +sp-timestamp = { path = "../../primitives/timestamp", default-features = false } frame-system = { path = "../system" } sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } @@ -83,6 +95,7 @@ std = [ "sp-staking/std", "sp-state-machine/std", "sp-std/std", + "sp-timestamp/std", "sp-tracing/std", "sp-weights/std", ] diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs index 5613047359a72ca828514ef6e492fadc87611128..dbbe6ba6e6c32ec09a8514033108617883075243 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs @@ -76,10 +76,6 @@ pub fn expand_outer_config( #fields } - #[cfg(any(feature = "std", test))] - #[deprecated(note = "GenesisConfig is planned to be removed in December 2023. Use `RuntimeGenesisConfig` instead.")] - pub type GenesisConfig = RuntimeGenesisConfig; - #[cfg(any(feature = "std", test))] impl #scrate::sp_runtime::BuildStorage for RuntimeGenesisConfig { fn assimilate_storage( diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs index 34b9d21d8ce84fafb7958278227a3f572d8d2f74..d21cfef4a927c8033c393719247d9c5a09d83e1a 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -73,11 +73,9 @@ pub fn expand_outer_inherent( #( #pallet_attrs if let Some(inherent) = #pallet_names::create_inherent(self) { - let inherent = <#unchecked_extrinsic as #scrate::sp_runtime::traits::Extrinsic>::new( + let inherent = <#unchecked_extrinsic as #scrate::sp_runtime::traits::Extrinsic>::new_inherent( inherent.into(), - None, - ).expect("Runtime UncheckedExtrinsic is not Opaque, so it has to return \ - `Some`; qed"); + ); inherents.push(inherent); } @@ -123,7 +121,7 @@ pub fn expand_outer_inherent( for xt in block.extrinsics() { // Inherents are before any other extrinsics. // And signed extrinsics are not inherents. - if #scrate::sp_runtime::traits::Extrinsic::is_signed(xt).unwrap_or(false) { + if !(#scrate::sp_runtime::traits::Extrinsic::is_bare(xt)) { break } @@ -161,10 +159,9 @@ pub fn expand_outer_inherent( match #pallet_names::is_inherent_required(self) { Ok(Some(e)) => { let found = block.extrinsics().iter().any(|xt| { - let is_signed = #scrate::sp_runtime::traits::Extrinsic::is_signed(xt) - .unwrap_or(false); + let is_bare = #scrate::sp_runtime::traits::Extrinsic::is_bare(xt); - if !is_signed { + if is_bare { let call = < #unchecked_extrinsic as ExtrinsicCall >::call(xt); @@ -204,47 +201,51 @@ pub fn expand_outer_inherent( } } + impl #scrate::traits::IsInherent<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic> for #runtime { + fn is_inherent(ext: &<#block as #scrate::sp_runtime::traits::Block>::Extrinsic) -> bool { + use #scrate::inherent::ProvideInherent; + use #scrate::traits::{IsSubType, ExtrinsicCall}; + + let is_bare = #scrate::sp_runtime::traits::Extrinsic::is_bare(ext); + if !is_bare { + // Signed extrinsics are not inherents. + return false + } + + #( + #pallet_attrs + { + let call = <#unchecked_extrinsic as ExtrinsicCall>::call(ext); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if <#pallet_names as ProvideInherent>::is_inherent(&call) { + return true; + } + } + } + )* + false + } + } + impl #scrate::traits::EnsureInherentsAreFirst<#block> for #runtime { - fn ensure_inherents_are_first(block: &#block) -> Result<(), u32> { + fn ensure_inherents_are_first(block: &#block) -> Result { use #scrate::inherent::ProvideInherent; use #scrate::traits::{IsSubType, ExtrinsicCall}; use #scrate::sp_runtime::traits::Block as _; - let mut first_signed_observed = false; + let mut num_inherents = 0u32; for (i, xt) in block.extrinsics().iter().enumerate() { - let is_signed = #scrate::sp_runtime::traits::Extrinsic::is_signed(xt) - .unwrap_or(false); - - let is_inherent = if is_signed { - // Signed extrinsics are not inherents. - false - } else { - let mut is_inherent = false; - #( - #pallet_attrs - { - let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); - if let Some(call) = IsSubType::<_>::is_sub_type(call) { - if #pallet_names::is_inherent(&call) { - is_inherent = true; - } - } - } - )* - is_inherent - }; - - if !is_inherent { - first_signed_observed = true; - } + if >::is_inherent(xt) { + if num_inherents != i as u32 { + return Err(i as u32); + } - if first_signed_observed && is_inherent { - return Err(i as u32) + num_inherents += 1; // Safe since we are in an `enumerate` loop. } } - Ok(()) + Ok(num_inherents) } } } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index 0e76f9a92469a73d3a616da454dc46ded5034afc..f55ca677d89c372261a885572d48b36f395d1a5b 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -119,13 +119,13 @@ pub fn expand_runtime_metadata( call_ty, signature_ty, extra_ty, - signed_extensions: < + extensions: < < #extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata - >::SignedExtensions as #scrate::sp_runtime::traits::SignedExtension + >::Extra as #scrate::sp_runtime::traits::TransactionExtensionBase >::metadata() .into_iter() - .map(|meta| #scrate::__private::metadata_ir::SignedExtensionMetadataIR { + .map(|meta| #scrate::__private::metadata_ir::TransactionExtensionMetadataIR { identifier: meta.identifier, ty: meta.ty, additional_signed: meta.additional_signed, diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs index b421d2aaffabe077450d81e3396976f3067b264a..341621deb098c1905557bd953d750283dc45ae51 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -104,7 +104,7 @@ pub fn expand_outer_origin( #[doc = #doc_string] #[derive(Clone)] pub struct RuntimeOrigin { - caller: OriginCaller, + pub caller: OriginCaller, filter: #scrate::__private::sp_std::rc::Rc::RuntimeCall) -> bool>>, } @@ -153,6 +153,10 @@ pub fn expand_outer_origin( self.filter = #scrate::__private::sp_std::rc::Rc::new(Box::new(filter)); } + fn set_caller(&mut self, caller: OriginCaller) { + self.caller = caller; + } + fn set_caller_from(&mut self, other: impl Into) { self.caller = other.into().caller; } @@ -301,6 +305,16 @@ pub fn expand_outer_origin( } } + impl #scrate::__private::AsSystemOriginSigner<<#runtime as #system_path::Config>::AccountId> for RuntimeOrigin { + fn as_system_origin_signer(&self) -> Option<&<#runtime as #system_path::Config>::AccountId> { + if let OriginCaller::system(#system_path::Origin::<#runtime>::Signed(ref signed)) = &self.caller { + Some(signed) + } else { + None + } + } + } + #pallet_conversions }) } diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 54ed15f7b1d36d6ee5eb724381152af03bef60c8..6e95fdf116a6fefe92e3fa94f91f4424013aa35f 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -233,25 +233,38 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { let input_copy = input.clone(); let definition = syn::parse_macro_input!(input as RuntimeDeclaration); - let res = match definition { - RuntimeDeclaration::Implicit(implicit_def) => - check_pallet_number(input_copy.clone().into(), implicit_def.pallets.len()).and_then( - |_| construct_runtime_implicit_to_explicit(input_copy.into(), implicit_def), - ), - RuntimeDeclaration::Explicit(explicit_decl) => check_pallet_number( - input_copy.clone().into(), - explicit_decl.pallets.len(), - ) - .and_then(|_| { - construct_runtime_explicit_to_explicit_expanded(input_copy.into(), explicit_decl) - }), - RuntimeDeclaration::ExplicitExpanded(explicit_decl) => - check_pallet_number(input_copy.into(), explicit_decl.pallets.len()) - .and_then(|_| construct_runtime_final_expansion(explicit_decl)), + let (check_pallet_number_res, res) = match definition { + RuntimeDeclaration::Implicit(implicit_def) => ( + check_pallet_number(input_copy.clone().into(), implicit_def.pallets.len()), + construct_runtime_implicit_to_explicit(input_copy.into(), implicit_def), + ), + RuntimeDeclaration::Explicit(explicit_decl) => ( + check_pallet_number(input_copy.clone().into(), explicit_decl.pallets.len()), + construct_runtime_explicit_to_explicit_expanded(input_copy.into(), explicit_decl), + ), + RuntimeDeclaration::ExplicitExpanded(explicit_decl) => ( + check_pallet_number(input_copy.into(), explicit_decl.pallets.len()), + construct_runtime_final_expansion(explicit_decl), + ), }; let res = res.unwrap_or_else(|e| e.to_compile_error()); + // We want to provide better error messages to the user and thus, handle the error here + // separately. If there is an error, we print the error and still generate all of the code to + // get in overall less errors for the user. + let res = if let Err(error) = check_pallet_number_res { + let error = error.to_compile_error(); + + quote! { + #error + + #res + } + } else { + res + }; + let res = expander::Expander::new("construct_runtime") .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) diff --git a/substrate/frame/support/procedural/src/derive_impl.rs b/substrate/frame/support/procedural/src/derive_impl.rs index d6d5bf68efd5689af2e96250e24cef3cd7faf47b..8740ccd401abedcbcb27e19c77ef3e885207342a 100644 --- a/substrate/frame/support/procedural/src/derive_impl.rs +++ b/substrate/frame/support/procedural/src/derive_impl.rs @@ -172,6 +172,33 @@ fn combine_impls( final_impl } +/// Computes the disambiguation path for the `derive_impl` attribute macro. +/// +/// When specified explicitly using `as [disambiguation_path]` in the macro attr, the +/// disambiguation is used as is. If not, we infer the disambiguation path from the +/// `foreign_impl_path` and the computed scope. +fn compute_disambiguation_path( + disambiguation_path: Option, + foreign_impl: ItemImpl, + default_impl_path: Path, +) -> Result { + match (disambiguation_path, foreign_impl.clone().trait_) { + (Some(disambiguation_path), _) => Ok(disambiguation_path), + (None, Some((_, foreign_impl_path, _))) => + if default_impl_path.segments.len() > 1 { + let scope = default_impl_path.segments.first(); + Ok(parse_quote!(#scope :: #foreign_impl_path)) + } else { + Ok(foreign_impl_path) + }, + _ => Err(syn::Error::new( + default_impl_path.span(), + "Impl statement must have a defined type being implemented \ + for a defined type such as `impl A for B`", + )), + } +} + /// Internal implementation behind [`#[derive_impl(..)]`](`macro@crate::derive_impl`). /// /// `default_impl_path`: the module path of the external `impl` statement whose tokens we are @@ -194,18 +221,11 @@ pub fn derive_impl( let foreign_impl = parse2::(foreign_tokens)?; let default_impl_path = parse2::(default_impl_path)?; - // have disambiguation_path default to the item being impl'd in the foreign impl if we - // don't specify an `as [disambiguation_path]` in the macro attr - let disambiguation_path = match (disambiguation_path, foreign_impl.clone().trait_) { - (Some(disambiguation_path), _) => disambiguation_path, - (None, Some((_, foreign_impl_path, _))) => foreign_impl_path, - _ => - return Err(syn::Error::new( - foreign_impl.span(), - "Impl statement must have a defined type being implemented \ - for a defined type such as `impl A for B`", - )), - }; + let disambiguation_path = compute_disambiguation_path( + disambiguation_path, + foreign_impl.clone(), + default_impl_path.clone(), + )?; // generate the combined impl let combined_impl = combine_impls( @@ -257,3 +277,27 @@ fn test_runtime_type_with_doc() { } } } + +#[test] +fn test_disambugation_path() { + let foreign_impl: ItemImpl = parse_quote!(impl SomeTrait for SomeType {}); + let default_impl_path: Path = parse_quote!(SomeScope::SomeType); + + // disambiguation path is specified + let disambiguation_path = compute_disambiguation_path( + Some(parse_quote!(SomeScope::SomePath)), + foreign_impl.clone(), + default_impl_path.clone(), + ); + assert_eq!(disambiguation_path.unwrap(), parse_quote!(SomeScope::SomePath)); + + // disambiguation path is not specified and the default_impl_path has more than one segment + let disambiguation_path = + compute_disambiguation_path(None, foreign_impl.clone(), default_impl_path.clone()); + assert_eq!(disambiguation_path.unwrap(), parse_quote!(SomeScope::SomeTrait)); + + // disambiguation path is not specified and the default_impl_path has only one segment + let disambiguation_path = + compute_disambiguation_path(None, foreign_impl.clone(), parse_quote!(SomeType)); + assert_eq!(disambiguation_path.unwrap(), parse_quote!(SomeTrait)); +} diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 20b8d74310f3e4aca7a5cae61fe3e5d72ec0626c..036da52a7badc4c31c28db364c819bef29bb1f62 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -188,133 +188,11 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { construct_runtime::construct_runtime(input) } -/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to specify -/// pallet information. /// -/// The struct must be defined as follows: -/// ```ignore -/// #[pallet::pallet] -/// pub struct Pallet(_); -/// ``` -/// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. -/// -/// ## Macro expansion: -/// -/// The macro adds this attribute to the struct definition: -/// ```ignore -/// #[derive( -/// frame_support::CloneNoBound, -/// frame_support::EqNoBound, -/// frame_support::PartialEqNoBound, -/// frame_support::RuntimeDebugNoBound, -/// )] -/// ``` -/// and replaces the type `_` with `PhantomData`. It also implements on the pallet: -/// * `GetStorageVersion` -/// * `OnGenesis`: contains some logic to write the pallet version into storage. -/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. -/// -/// It declares `type Module` type alias for `Pallet`, used by `construct_runtime`. -/// -/// It implements `PalletInfoAccess` on `Pallet` to ease access to pallet information given by -/// `frame_support::traits::PalletInfo`. (The implementation uses the associated type -/// `frame_system::Config::PalletInfo`). -/// -/// It implements `StorageInfoTrait` on `Pallet` which give information about all storages. -/// -/// If the attribute `generate_store` is set then the macro creates the trait `Store` and -/// implements it on `Pallet`. -/// -/// If the attribute `set_storage_max_encoded_len` is set then the macro calls -/// `StorageInfoTrait` for each storage in the implementation of `StorageInfoTrait` for the -/// pallet. Otherwise it implements `StorageInfoTrait` for the pallet using the -/// `PartialStorageInfoTrait` implementation of storages. -/// -/// ## Dev Mode (`#[pallet(dev_mode)]`) -/// -/// Specifying the argument `dev_mode` will allow you to enable dev mode for a pallet. The aim -/// of dev mode is to loosen some of the restrictions and requirements placed on production -/// pallets for easy tinkering and development. Dev mode pallets should not be used in -/// production. Enabling dev mode has the following effects: -/// -/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By default, dev -/// mode pallets will assume a weight of zero (`0`) if a weight is not specified. This is -/// equivalent to specifying `#[weight(0)]` on all calls that do not specify a weight. -/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By -/// default, dev mode pallets will assume a call index based on the order of the call. -/// * All storages are marked as unbounded, meaning you do not need to implement `MaxEncodedLen` on -/// storage types. This is equivalent to specifying `#[pallet::unbounded]` on all storage type -/// definitions. -/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, these -/// will be replaced by `Blake2_128Concat`. In case of explicit key-binding, `Hasher` can simply -/// be ignored when in `dev_mode`. -/// -/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or -/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This argument -/// cannot be specified anywhere else, including but not limited to the `#[pallet::pallet]` -/// attribute macro. -/// -///
-/// WARNING:
-/// You should not deploy or use dev mode pallets in production. Doing so can break your chain
-/// and therefore should never be done. Once you are done tinkering, you should remove the
-/// 'dev_mode' argument from your #[pallet] declaration and fix any compile errors before
-/// attempting to use your pallet in a production scenario.
-/// 
-/// -/// See `frame_support::pallet` docs for more info. -/// -/// ## Runtime Metadata Documentation -/// -/// The documentation added to this pallet is included in the runtime metadata. -/// -/// The documentation can be defined in the following ways: -/// -/// ```ignore -/// #[pallet::pallet] -/// /// Documentation for pallet 1 -/// #[doc = "Documentation for pallet 2"] -/// #[doc = include_str!("../README.md")] -/// #[pallet_doc("../doc1.md")] -/// #[pallet_doc("../doc2.md")] -/// pub mod pallet {} -/// ``` -/// -/// The runtime metadata for this pallet contains the following -/// - " Documentation for pallet 1" (captured from `///`) -/// - "Documentation for pallet 2" (captured from `#[doc]`) -/// - content of ../README.md (captured from `#[doc]` with `include_str!`) -/// - content of "../doc1.md" (captured from `pallet_doc`) -/// - content of "../doc2.md" (captured from `pallet_doc`) -/// -/// ### `doc` attribute -/// -/// The value of the `doc` attribute is included in the runtime metadata, as well as -/// expanded on the pallet module. The previous example is expanded to: -/// -/// ```ignore -/// /// Documentation for pallet 1 -/// /// Documentation for pallet 2 -/// /// Content of README.md -/// pub mod pallet {} -/// ``` -/// -/// If you want to specify the file from which the documentation is loaded, you can use the -/// `include_str` macro. However, if you only want the documentation to be included in the -/// runtime metadata, use the `pallet_doc` attribute. -/// -/// ### `pallet_doc` attribute -/// -/// Unlike the `doc` attribute, the documentation provided to the `pallet_doc` attribute is -/// not inserted on the module. -/// -/// The `pallet_doc` attribute can only be provided with one argument, -/// which is the file path that holds the documentation to be added to the metadata. +/// --- /// -/// This approach is beneficial when you use the `include_str` macro at the beginning of the file -/// and want that documentation to extend to the runtime metadata, without reiterating the -/// documentation on the pallet module itself. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet`. #[proc_macro_attribute] pub fn pallet(attr: TokenStream, item: TokenStream) -> TokenStream { pallet::pallet(attr, item) @@ -408,19 +286,28 @@ pub fn transactional(attr: TokenStream, input: TokenStream) -> TokenStream { transactional::transactional(attr, input).unwrap_or_else(|e| e.to_compile_error().into()) } +/// +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::require_transactional`. #[proc_macro_attribute] pub fn require_transactional(attr: TokenStream, input: TokenStream) -> TokenStream { transactional::require_transactional(attr, input) .unwrap_or_else(|e| e.to_compile_error().into()) } -/// Derive [`Clone`] but do not bound any generic. Docs are at `frame_support::CloneNoBound`. +/// Derive [`Clone`] but do not bound any generic. +/// +/// Docs at `frame_support::CloneNoBound`. #[proc_macro_derive(CloneNoBound)] pub fn derive_clone_no_bound(input: TokenStream) -> TokenStream { no_bound::clone::derive_clone_no_bound(input) } -/// Derive [`Debug`] but do not bound any generics. Docs are at `frame_support::DebugNoBound`. +/// Derive [`Debug`] but do not bound any generics. +/// +/// Docs at `frame_support::DebugNoBound`. #[proc_macro_derive(DebugNoBound)] pub fn derive_debug_no_bound(input: TokenStream) -> TokenStream { no_bound::debug::derive_debug_no_bound(input) @@ -452,14 +339,17 @@ pub fn derive_runtime_debug_no_bound(input: TokenStream) -> TokenStream { } } -/// Derive [`PartialEq`] but do not bound any generic. Docs are at -/// `frame_support::PartialEqNoBound`. +/// Derive [`PartialEq`] but do not bound any generic. +/// +/// Docs at `frame_support::PartialEqNoBound`. #[proc_macro_derive(PartialEqNoBound)] pub fn derive_partial_eq_no_bound(input: TokenStream) -> TokenStream { no_bound::partial_eq::derive_partial_eq_no_bound(input) } -/// Derive [`Eq`] but do no bound any generic. Docs are at `frame_support::EqNoBound`. +/// DeriveEq but do no bound any generic. +/// +/// Docs at `frame_support::EqNoBound`. #[proc_macro_derive(EqNoBound)] pub fn derive_eq_no_bound(input: TokenStream) -> TokenStream { let input = syn::parse_macro_input!(input as syn::DeriveInput); @@ -494,6 +384,7 @@ pub fn derive_default_no_bound(input: TokenStream) -> TokenStream { no_bound::default::derive_default_no_bound(input) } +/// Macro used internally in FRAME to generate the crate version for a pallet. #[proc_macro] pub fn crate_to_crate_version(input: TokenStream) -> TokenStream { crate_version::crate_to_crate_version(input) @@ -555,6 +446,11 @@ pub fn __create_tt_macro(input: TokenStream) -> TokenStream { tt_macro::create_tt_return_macro(input) } +/// +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::storage_alias`. #[proc_macro_attribute] pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream { storage_alias::storage_alias(attributes.into(), input.into()) @@ -603,6 +499,11 @@ pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream /// default to `A` from the `impl A for B` part of the default impl. This is useful for scenarios /// where all of the relevant types are already in scope via `use` statements. /// +/// In case the `default_impl_path` is scoped to a different module such as +/// `some::path::TestTraitImpl`, the same scope is assumed for the `disambiguation_path`, i.e. +/// `some::A`. This enables the use of `derive_impl` attribute without having to specify the +/// `disambiguation_path` in most (if not all) uses within FRAME's context. +/// /// Conversely, the `default_impl_path` argument is required and cannot be omitted. /// /// Optionally, `no_aggregated_types` can be specified as follows: @@ -785,24 +686,21 @@ pub fn derive_impl(attrs: TokenStream, input: TokenStream) -> TokenStream { .into() } -/// The optional attribute `#[pallet::no_default]` can be attached to trait items within a -/// `Config` trait impl that has [`#[pallet::config(with_default)]`](`macro@config`) attached. /// -/// Attaching this attribute to a trait item ensures that that trait item will not be used as a -/// default with the [`#[derive_impl(..)]`](`macro@derive_impl`) attribute macro. +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::no_default`. #[proc_macro_attribute] pub fn no_default(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::no_default_bounds]` can be attached to trait items within a -/// `Config` trait impl that has [`#[pallet::config(with_default)]`](`macro@config`) attached. /// -/// Attaching this attribute to a trait item ensures that the generated trait `DefaultConfig` -/// will not have any bounds for this trait item. +/// --- /// -/// As an example, if you have a trait item `type AccountId: SomeTrait;` in your `Config` trait, -/// the generated `DefaultConfig` will only have `type AccountId;` with no trait bound. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::no_default_bounds`. #[proc_macro_attribute] pub fn no_default_bounds(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -882,6 +780,11 @@ pub fn register_default_impl(attrs: TokenStream, tokens: TokenStream) -> TokenSt } } +/// +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_prelude::inject_runtime_type`. #[proc_macro_attribute] pub fn inject_runtime_type(_: TokenStream, tokens: TokenStream) -> TokenStream { let item = tokens.clone(); @@ -915,75 +818,11 @@ fn pallet_macro_stub() -> TokenStream { .into() } -/// The mandatory attribute `#[pallet::config]` defines the configurable options for the pallet. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::config] -/// pub trait Config: frame_system::Config + $optionally_some_other_supertraits -/// $optional_where_clause -/// { -/// ... -/// } -/// ``` -/// -/// I.e. a regular trait definition named `Config`, with the supertrait -/// `frame_system::pallet::Config`, and optionally other supertraits and a where clause. -/// (Specifying other supertraits here is known as [tight -/// coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) -/// -/// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds -/// `From` and `IsType<::RuntimeEvent>`. -/// -/// [`pallet::event`](`macro@event`) must be present if `RuntimeEvent` exists as a config item -/// in your `#[pallet::config]`. -/// -/// ## Optional: `with_default` -/// -/// An optional `with_default` argument may also be specified. Doing so will automatically -/// generate a `DefaultConfig` trait inside your pallet which is suitable for use with -/// [`[#[derive_impl(..)]`](`macro@derive_impl`) to derive a default testing config: -/// -/// ```ignore -/// #[pallet::config(with_default)] -/// pub trait Config: frame_system::Config { -/// type RuntimeEvent: Parameter -/// + Member -/// + From> -/// + Debug -/// + IsType<::RuntimeEvent>; -/// -/// #[pallet::no_default] -/// type BaseCallFilter: Contains; -/// // ... -/// } -/// ``` -/// -/// As shown above, you may also attach the [`#[pallet::no_default]`](`macro@no_default`) -/// attribute to specify that a particular trait item _cannot_ be used as a default when a test -/// `Config` is derived using the [`#[derive_impl(..)]`](`macro@derive_impl`) attribute macro. -/// This will cause that particular trait item to simply not appear in default testing configs -/// based on this config (the trait item will not be included in `DefaultConfig`). -/// -/// ### `DefaultConfig` Caveats /// -/// The auto-generated `DefaultConfig` trait: -/// - is always a _subset_ of your pallet's `Config` trait. -/// - can only contain items that don't rely on externalities, such as `frame_system::Config`. -/// -/// Trait items that _do_ rely on externalities should be marked with -/// [`#[pallet::no_default]`](`macro@no_default`) -/// -/// Consequently: -/// - Any items that rely on externalities _must_ be marked with -/// [`#[pallet::no_default]`](`macro@no_default`) or your trait will fail to compile when used -/// with [`derive_impl`](`macro@derive_impl`). -/// - Items marked with [`#[pallet::no_default]`](`macro@no_default`) are entirely excluded from the -/// `DefaultConfig` trait, and therefore any impl of `DefaultConfig` doesn't need to implement -/// such items. +/// --- /// -/// For more information, see [`macro@derive_impl`]. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::config`. #[proc_macro_attribute] pub fn config(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -992,7 +831,7 @@ pub fn config(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::constant`. #[proc_macro_attribute] pub fn constant(_: TokenStream, _: TokenStream) -> TokenStream { @@ -1002,106 +841,58 @@ pub fn constant(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::constant_name`. #[proc_macro_attribute] pub fn constant_name(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// To bypass the `frame_system::Config` supertrait check, use the attribute -/// `pallet::disable_frame_system_supertrait_check`, e.g.: /// -/// ```ignore -/// #[pallet::config] -/// #[pallet::disable_frame_system_supertrait_check] -/// pub trait Config: pallet_timestamp::Config {} -/// ``` +/// --- /// -/// NOTE: Bypassing the `frame_system::Config` supertrait check is typically desirable when you -/// want to write an alternative to the `frame_system` pallet. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::disable_frame_system_supertrait_check`. #[proc_macro_attribute] pub fn disable_frame_system_supertrait_check(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// To generate a `Store` trait associating all storages, annotate your `Pallet` struct with -/// the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: -/// -/// ```ignore -/// #[pallet::pallet] -/// #[pallet::generate_store(pub(super) trait Store)] -/// pub struct Pallet(_); -/// ``` -/// More precisely, the `Store` trait contains an associated type for each storage. It is -/// implemented for `Pallet` allowing access to the storage from pallet struct. /// -/// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using -/// `::Foo`. +/// --- /// -/// NOTE: this attribute is only valid when applied _directly_ to your `Pallet` struct -/// definition. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::generate_store`. #[proc_macro_attribute] pub fn generate_store(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Because the `pallet::pallet` macro implements `GetStorageVersion`, the current storage -/// version needs to be communicated to the macro. This can be done by using the -/// `pallet::storage_version` attribute: /// -/// ```ignore -/// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); -/// -/// #[pallet::pallet] -/// #[pallet::storage_version(STORAGE_VERSION)] -/// pub struct Pallet(_); -/// ``` +/// --- /// -/// If not present, the current storage version is set to the default value. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::storage_version`. #[proc_macro_attribute] pub fn storage_version(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::hooks]` attribute allows you to specify a `Hooks` implementation for -/// `Pallet` that specifies pallet-specific logic. -/// -/// The item the attribute attaches to must be defined as follows: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks> for Pallet $optional_where_clause { -/// ... -/// } -/// ``` -/// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait -/// `Hooks>` (they are defined in preludes), for the type `Pallet` and -/// with an optional where clause. -/// -/// If no `#[pallet::hooks]` exists, then the following default implementation is -/// automatically generated: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks> for Pallet {} -/// ``` -/// -/// ## Macro expansion -/// -/// The macro implements the traits `OnInitialize`, `OnIdle`, `OnFinalize`, `OnRuntimeUpgrade`, -/// `OffchainWorker`, and `IntegrityTest` using the provided `Hooks` implementation. /// -/// NOTE: `OnRuntimeUpgrade` is implemented with `Hooks::on_runtime_upgrade` and some -/// additional logic. E.g. logic to write the pallet version into storage. +/// --- /// -/// NOTE: The macro also adds some tracing logic when implementing the above traits. The -/// following hooks emit traces: `on_initialize`, `on_finalize` and `on_runtime_upgrade`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::hooks`. #[proc_macro_attribute] pub fn hooks(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, the -/// first argument must be `origin: OriginFor`. +/// +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::weight`. #[proc_macro_attribute] pub fn weight(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1110,8 +901,8 @@ pub fn weight(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in -/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::compact`. #[proc_macro_attribute] pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1120,8 +911,8 @@ pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in -/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::call`. #[proc_macro_attribute] pub fn call(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1132,164 +923,60 @@ pub fn call(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in -/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::call_index`. #[proc_macro_attribute] pub fn call_index(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Each dispatchable may be annotated with the `#[pallet::feeless_if($closure)]` attribute, -/// which explicitly defines the condition for the dispatchable to be feeless. -/// -/// The arguments for the closure must be the referenced arguments of the dispatchable function. -/// -/// The closure must return `bool`. -/// -/// ### Example -/// ```ignore -/// #[pallet::feeless_if(|_origin: &OriginFor, something: &u32| -> bool { -/// *something == 0 -/// })] -/// pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { -/// .... -/// } -/// ``` /// -/// Please note that this only works for signed dispatchables and requires a signed extension -/// such as `SkipCheckIfFeeless` as defined in `pallet-skip-feeless-payment` to wrap the existing -/// payment extension. Else, this is completely ignored and the dispatchable is still charged. +/// --- /// -/// ### Macro expansion +/// Rust-Analyzer Users: Documentation for this macro can be found at /// -/// The macro implements the `CheckIfFeeless` trait on the dispatchable and calls the corresponding -/// closure in the implementation. +/// `frame_support::pallet_macros::feeless_if`. #[proc_macro_attribute] pub fn feeless_if(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Allows you to define some extra constants to be added into constant metadata. -/// -/// Item must be defined as: /// -/// ```ignore -/// #[pallet::extra_constants] -/// impl Pallet where $optional_where_clause { -/// /// $some_doc -/// $vis fn $fn_name() -> $some_return_type { -/// ... -/// } -/// ... -/// } -/// ``` -/// I.e. a regular rust `impl` block with some optional where clause and functions with 0 args, -/// 0 generics, and some return type. +/// --- /// -/// ## Macro expansion +/// Rust-Analyzer Users: Documentation for this macro can be found at /// -/// The macro add some extra constants to pallet constant metadata. +/// `frame_support::pallet_macros::extra_constants`. #[proc_macro_attribute] pub fn extra_constants(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::error]` attribute allows you to define an error enum that will be returned -/// from the dispatchable when an error occurs. The information for this error type is then -/// stored in metadata. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::error] -/// pub enum Error { -/// /// $some_optional_doc -/// $SomeFieldLessVariant, -/// /// $some_more_optional_doc -/// $SomeVariantWithOneField(FieldType), -/// ... -/// } -/// ``` -/// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field -/// variants. -/// -/// Any field type in the enum variants must implement `TypeInfo` in order to be properly used -/// in the metadata, and its encoded size should be as small as possible, preferably 1 byte in -/// size in order to reduce storage size. The error enum itself has an absolute maximum encoded -/// size specified by `MAX_MODULE_ERROR_ENCODED_SIZE`. /// -/// (1 byte can still be 256 different errors. The more specific the error, the easier it is to -/// diagnose problems and give a better experience to the user. Don't skimp on having lots of -/// individual error conditions.) -/// -/// Field types in enum variants must also implement `PalletError`, otherwise the pallet will -/// fail to compile. Rust primitive types have already implemented the `PalletError` trait -/// along with some commonly used stdlib types such as [`Option`] and `PhantomData`, and hence -/// in most use cases, a manual implementation is not necessary and is discouraged. -/// -/// The generic `T` must not bound anything and a `where` clause is not allowed. That said, -/// bounds and/or a where clause should not needed for any use-case. -/// -/// ## Macro expansion -/// -/// The macro implements the [`Debug`] trait and functions `as_u8` using variant position, and -/// `as_str` using variant doc. +/// --- /// -/// The macro also implements `From>` for `&'static str` and `From>` for -/// `DispatchError`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::error`. #[proc_macro_attribute] pub fn error(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::event]` attribute allows you to define pallet events. Pallet events are -/// stored under the `system` / `events` key when the block is applied (and then replaced when -/// the next block writes it's events). -/// -/// The Event enum must be defined as follows: /// -/// ```ignore -/// #[pallet::event] -/// #[pallet::generate_deposit($visibility fn deposit_event)] // Optional -/// pub enum Event<$some_generic> $optional_where_clause { -/// /// Some doc -/// $SomeName($SomeType, $YetanotherType, ...), -/// ... -/// } -/// ``` -/// -/// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none or -/// `T` or `T: Config`, and optional w here clause. +/// --- /// -/// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], `Encode`, `Decode`, and -/// [`Debug`] (on std only). For ease of use, bound by the trait `Member`, available in -/// `frame_support::pallet_prelude`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::event`. #[proc_macro_attribute] pub fn event(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generates a -/// helper function on `Pallet` that handles deposit events. -/// -/// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. /// -/// ## Macro expansion -/// -/// The macro will add on enum `Event` the attributes: -/// * `#[derive(frame_support::CloneNoBound)]` -/// * `#[derive(frame_support::EqNoBound)]` -/// * `#[derive(frame_support::PartialEqNoBound)]` -/// * `#[derive(frame_support::RuntimeDebugNoBound)]` -/// * `#[derive(codec::Encode)]` -/// * `#[derive(codec::Decode)]` -/// -/// The macro implements `From>` for (). -/// -/// The macro implements a metadata function on `Event` returning the `EventMetadata`. +/// --- /// -/// If `#[pallet::generate_deposit]` is present then the macro implements `fn deposit_event` on -/// `Pallet`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::generate_deposit`. #[proc_macro_attribute] pub fn generate_deposit(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1298,105 +985,68 @@ pub fn generate_deposit(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::storage`. #[proc_macro_attribute] pub fn storage(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows you to define a -/// getter function on `Pallet`. /// -/// Also see [`pallet::storage`](`macro@storage`) +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::getter`. #[proc_macro_attribute] pub fn getter(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allows you to define the -/// storage prefix to use. This is helpful if you wish to rename the storage field but don't -/// want to perform a migration. /// -/// E.g: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::storage_prefix = "foo"] -/// #[pallet::getter(fn my_storage)] -/// pub(super) type MyStorage = StorageMap; -/// ``` -/// -/// or +/// --- /// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn my_storage)] -/// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; -/// ``` +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::storage_prefix`. #[proc_macro_attribute] pub fn storage_prefix(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::unbounded]` declares the storage as unbounded. When -/// implementating the storage info (when `#[pallet::generate_storage_info]` is specified on -/// the pallet struct placeholder), the size of the storage will be declared as unbounded. This -/// can be useful for storage which can never go into PoV (Proof of Validity). +/// +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::unbounded`. #[proc_macro_attribute] pub fn unbounded(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::whitelist_storage]` will declare the -/// storage as whitelisted from benchmarking. Doing so will exclude reads of -/// that value's storage key from counting towards weight calculations during -/// benchmarking. /// -/// This attribute should only be attached to storages that are known to be -/// read/used in every block. This will result in a more accurate benchmarking weight. -/// -/// ### Example -/// ```ignore -/// #[pallet::storage] -/// #[pallet::whitelist_storage] -/// pub(super) type Number = StorageValue<_, frame_system::pallet_prelude::BlockNumberFor::, ValueQuery>; -/// ``` +/// --- /// -/// NOTE: As with all `pallet::*` attributes, this one _must_ be written as -/// `#[pallet::whitelist_storage]` and can only be placed inside a `pallet` module in order for -/// it to work properly. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::whitelist_storage`. #[proc_macro_attribute] pub fn whitelist_storage(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::type_value]` attribute lets you define a struct implementing the `Get` trait -/// to ease the use of storage types. This attribute is meant to be used alongside -/// [`#[pallet::storage]`](`macro@storage`) to define a storage's default value. This attribute -/// can be used multiple times. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::type_value] -/// fn $MyDefaultName<$some_generic>() -> $default_type $optional_where_clause { $expr } -/// ``` -/// -/// I.e.: a function definition with generics none or `T: Config` and a returned type. /// -/// E.g.: +/// --- /// -/// ```ignore -/// #[pallet::type_value] -/// fn MyDefault() -> T::Balance { 3.into() } -/// ``` +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::disable_try_decode_storage`. +#[proc_macro_attribute] +pub fn disable_try_decode_storage(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + /// -/// ## Macro expansion +/// --- /// -/// The macro renames the function to some internal name, generates a struct with the original -/// name of the function and its generic, and implements `Get<$ReturnType>` by calling the user -/// defined function. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::type_value`. #[proc_macro_attribute] pub fn type_value(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1405,7 +1055,7 @@ pub fn type_value(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::genesis_config`. #[proc_macro_attribute] pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { @@ -1415,115 +1065,48 @@ pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::genesis_build`. #[proc_macro_attribute] pub fn genesis_build(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::inherent]` attribute allows the pallet to provide some -/// [inherent](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). -/// An inherent is some piece of data that is inserted by a block authoring node at block -/// creation time and can either be accepted or rejected by validators based on whether the -/// data falls within an acceptable range. -/// -/// The most common inherent is the `timestamp` that is inserted into every block. Since there -/// is no way to validate timestamps, validators simply check that the timestamp reported by -/// the block authoring node falls within an acceptable range. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::inherent] -/// impl ProvideInherent for Pallet { -/// // ... regular trait implementation -/// } -/// ``` -/// -/// I.e. a trait implementation with bound `T: Config`, of trait `ProvideInherent` for type -/// `Pallet`, and some optional where clause. /// -/// ## Macro expansion +/// --- /// -/// The macro currently makes no use of this information, but it might use this information in -/// the future to give information directly to `construct_runtime`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::inherent`. #[proc_macro_attribute] pub fn inherent(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::validate_unsigned]` attribute allows the pallet to validate some unsigned -/// transaction: -/// -/// Item must be defined as: /// -/// ```ignore -/// #[pallet::validate_unsigned] -/// impl ValidateUnsigned for Pallet { -/// // ... regular trait implementation -/// } -/// ``` -/// -/// I.e. a trait implementation with bound `T: Config`, of trait `ValidateUnsigned` for type -/// `Pallet`, and some optional where clause. -/// -/// NOTE: There is also the `sp_runtime::traits::SignedExtension` trait that can be used to add -/// some specific logic for transaction validation. -/// -/// ## Macro expansion +/// --- /// -/// The macro currently makes no use of this information, but it might use this information in -/// the future to give information directly to `construct_runtime`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::validate_unsigned`. #[proc_macro_attribute] pub fn validate_unsigned(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::origin]` attribute allows you to define some origin for the pallet. /// -/// Item must be either a type alias, an enum, or a struct. It needs to be public. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::origin] -/// pub struct Origin(PhantomData<(T)>); -/// ``` -/// -/// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin -/// can be stored on-chain (e.g. in `pallet-scheduler`), thus any change must be done with care -/// as it might require some migration. +/// --- /// -/// NOTE: for instantiable pallets, the origin must be generic over `T` and `I`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::origin`. #[proc_macro_attribute] pub fn origin(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::composite_enum]` attribute allows you to define an enum that gets composed as an -/// aggregate enum by `construct_runtime`. This is similar in principle with `#[pallet::event]` and -/// `#[pallet::error]`. -/// -/// The attribute currently only supports enum definitions, and identifiers that are named -/// `FreezeReason`, `HoldReason`, `LockId` or `SlashReason`. Arbitrary identifiers for the enum are -/// not supported. The aggregate enum generated by `construct_runtime` will have the name of -/// `RuntimeFreezeReason`, `RuntimeHoldReason`, `RuntimeLockId` and `RuntimeSlashReason` -/// respectively. /// -/// NOTE: The aggregate enum generated by `construct_runtime` generates a conversion function from -/// the pallet enum to the aggregate enum, and automatically derives the following traits: -/// -/// ```ignore -/// Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo, -/// RuntimeDebug -/// ``` +/// --- /// -/// For ease of usage, when no `#[derive]` attributes are found for the enum under -/// `#[pallet::composite_enum]`, the aforementioned traits are automatically derived for it. The -/// inverse is also true: if there are any `#[derive]` attributes found for the enum, then no traits -/// will automatically be derived for it (this implies that you need to provide the -/// `frame_support::traits::VariantCount` implementation). +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::composite_enum`. #[proc_macro_attribute] pub fn composite_enum(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1579,25 +1162,11 @@ pub fn task_index(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Can be attached to a module. Doing so will declare that module as importable into a pallet -/// via [`#[import_section]`](`macro@import_section`). -/// -/// Note that sections are imported by their module name/ident, and should be referred to by -/// their _full path_ from the perspective of the target pallet. Do not attempt to make use -/// of `use` statements to bring pallet sections into scope, as this will not work (unless -/// you do so as part of a wildcard import, in which case it will work). -/// -/// ## Naming Logistics /// -/// Also note that because of how `#[pallet_section]` works, pallet section names must be -/// globally unique _within the crate in which they are defined_. For more information on -/// why this must be the case, see macro_magic's -/// [`#[export_tokens]`](https://docs.rs/macro_magic/latest/macro_magic/attr.export_tokens.html) macro. +/// --- /// -/// Optionally, you may provide an argument to `#[pallet_section]` such as -/// `#[pallet_section(some_ident)]`, in the event that there is another pallet section in -/// same crate with the same ident/name. The ident you specify can then be used instead of -/// the module's ident name when you go to import it via `#[import_section]`. +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::pallet_section`. #[proc_macro_attribute] pub fn pallet_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { let tokens_clone = tokens.clone(); @@ -1611,39 +1180,11 @@ pub fn pallet_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { } } -/// An attribute macro that can be attached to a module declaration. Doing so will -/// Imports the contents of the specified external pallet section that was defined -/// previously using [`#[pallet_section]`](`macro@pallet_section`). /// -/// ## Example -/// ```ignore -/// #[import_section(some_section)] -/// #[pallet] -/// pub mod pallet { -/// // ... -/// } -/// ``` -/// where `some_section` was defined elsewhere via: -/// ```ignore -/// #[pallet_section] -/// pub mod some_section { -/// // ... -/// } -/// ``` -/// -/// This will result in the contents of `some_section` being _verbatim_ imported into -/// the pallet above. Note that since the tokens for `some_section` are essentially -/// copy-pasted into the target pallet, you cannot refer to imports that don't also -/// exist in the target pallet, but this is easily resolved by including all relevant -/// `use` statements within your pallet section, so they are imported as well, or by -/// otherwise ensuring that you have the same imports on the target pallet. -/// -/// It is perfectly permissible to import multiple pallet sections into the same pallet, -/// which can be done by having multiple `#[import_section(something)]` attributes -/// attached to the pallet. +/// --- /// -/// Note that sections are imported by their module name/ident, and should be referred to by -/// their _full path_ from the perspective of the target pallet. +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::import_section`. #[import_tokens_attr { format!( "{}::macro_magic", diff --git a/substrate/frame/support/procedural/src/pallet/expand/call.rs b/substrate/frame/support/procedural/src/pallet/expand/call.rs index f43faba1ee0c8c14cb808b2f64f58446ebe7ffae..f395872c8a80a6b9a2ddd391ea106284140d53ba 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/call.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/call.rs @@ -18,7 +18,7 @@ use crate::{ pallet::{ expand::warnings::{weight_constant_warning, weight_witness_warning}, - parse::call::{CallVariantDef, CallWeightDef}, + parse::call::CallWeightDef, Def, }, COUNTER, @@ -112,22 +112,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { } debug_assert_eq!(fn_weight.len(), methods.len()); - let map_fn_docs = if !def.dev_mode { - // Emit the [`Pallet::method`] documentation only for non-dev modes. - |method: &CallVariantDef| { - let reference = format!("See [`Pallet::{}`].", method.name); - quote!(#reference) - } - } else { - // For the dev-mode do not provide a documenation link as it will break the - // `cargo doc` if the pallet is private inside a test. - |method: &CallVariantDef| { - let reference = format!("See `Pallet::{}`.", method.name); - quote!(#reference) - } - }; - - let fn_doc = methods.iter().map(map_fn_docs).collect::>(); + let fn_doc = methods.iter().map(|method| &method.docs).collect::>(); let args_name = methods .iter() @@ -309,7 +294,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { ), #( #cfg_attrs - #[doc = #fn_doc] + #( #[doc = #fn_doc] )* #[codec(index = #call_index)] #fn_name { #( diff --git a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs index 5044d4285bb64aceeccd14959743a494f3c94f85..3623b595268d081ee7b5750a5431f208383f8517 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs @@ -42,7 +42,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { >::name::().unwrap_or("") }; - let initialize_on_chain_storage_version = if let Some(current_version) = + let initialize_on_chain_storage_version = if let Some(in_code_version) = &def.pallet_struct.storage_version { quote::quote! { @@ -50,9 +50,9 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { target: #frame_support::LOG_TARGET, "🐥 New pallet {:?} detected in the runtime. Initializing the on-chain storage version to match the storage version defined in the pallet: {:?}", #pallet_name, - #current_version + #in_code_version ); - #current_version.put::(); + #in_code_version.put::(); } } else { quote::quote! { @@ -73,10 +73,10 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::__private::log::info!( target: #frame_support::LOG_TARGET, "⚠️ {} declares internal migrations (which *might* execute). \ - On-chain `{:?}` vs current storage version `{:?}`", + On-chain `{:?}` vs in-code storage version `{:?}`", #pallet_name, ::on_chain_storage_version(), - ::current_storage_version(), + ::in_code_storage_version(), ); } } else { @@ -102,23 +102,23 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { }; // If a storage version is set, we should ensure that the storage version on chain matches the - // current storage version. This assumes that `Executive` is running custom migrations before + // in-code storage version. This assumes that `Executive` is running custom migrations before // the pallets are called. let post_storage_version_check = if def.pallet_struct.storage_version.is_some() { quote::quote! { let on_chain_version = ::on_chain_storage_version(); - let current_version = ::current_storage_version(); + let in_code_version = ::in_code_storage_version(); - if on_chain_version != current_version { + if on_chain_version != in_code_version { #frame_support::__private::log::error!( target: #frame_support::LOG_TARGET, - "{}: On chain storage version {:?} doesn't match current storage version {:?}.", + "{}: On chain storage version {:?} doesn't match in-code storage version {:?}.", #pallet_name, on_chain_version, - current_version, + in_code_version, ); - return Err("On chain and current storage version do not match. Missing runtime upgrade?".into()); + return Err("On chain and in-code storage version do not match. Missing runtime upgrade?".into()); } } } else { @@ -175,6 +175,22 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } } + impl<#type_impl_gen> + #frame_support::traits::OnPoll<#frame_system::pallet_prelude::BlockNumberFor::> + for #pallet_ident<#type_use_gen> #where_clause + { + fn on_poll( + n: #frame_system::pallet_prelude::BlockNumberFor::, + weight: &mut #frame_support::weights::WeightMeter + ) { + < + Self as #frame_support::traits::Hooks< + #frame_system::pallet_prelude::BlockNumberFor:: + > + >::on_poll(n, weight); + } + } + impl<#type_impl_gen> #frame_support::traits::OnInitialize<#frame_system::pallet_prelude::BlockNumberFor::> for #pallet_ident<#type_use_gen> #where_clause diff --git a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs index c2102f0284dbeeadb08fc9122e5fe824032dfb82..7cdf6bde9de87d0bd4eed70de93173bc9bf4e3ff 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -160,7 +160,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { } ); - let (storage_version, current_storage_version_ty) = + let (storage_version, in_code_storage_version_ty) = if let Some(v) = def.pallet_struct.storage_version.as_ref() { (quote::quote! { #v }, quote::quote! { #frame_support::traits::StorageVersion }) } else { @@ -203,9 +203,9 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { for #pallet_ident<#type_use_gen> #config_where_clause { - type CurrentStorageVersion = #current_storage_version_ty; + type InCodeStorageVersion = #in_code_storage_version_ty; - fn current_storage_version() -> Self::CurrentStorageVersion { + fn in_code_storage_version() -> Self::InCodeStorageVersion { #storage_version } diff --git a/substrate/frame/support/procedural/src/pallet/expand/storage.rs b/substrate/frame/support/procedural/src/pallet/expand/storage.rs index 96c2c8e3120b8f8fb76e8ee10b067ce3f13f62e9..937b068cfabd14e04b05177be2f0242eccd3abec 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/storage.rs @@ -834,7 +834,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { .storages .iter() .filter_map(|storage| { - if storage.cfg_attrs.is_empty() { + // A little hacky; don't generate for cfg gated storages to not get compile errors + // when building "frame-feature-testing" gated storages in the "frame-support-test" + // crate. + if storage.try_decode && storage.cfg_attrs.is_empty() { let ident = &storage.ident; let gen = &def.type_use_generics(storage.attr_span); Some(quote::quote_spanned!(storage.attr_span => #ident<#gen> )) diff --git a/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs index f4af86aa3e9936a3e77036f062ba8d4bf1a6c601..c2855ae38ef05aaf4c5b9b9a3f96a461a87bb48c 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -44,7 +44,7 @@ pub struct PalletStructDef { /// Whether to specify the storages max encoded len when implementing `StorageInfoTrait`. /// Contains the span of the attribute. pub without_storage_info: Option, - /// The current storage version of the pallet. + /// The in-code storage version of the pallet. pub storage_version: Option, } diff --git a/substrate/frame/support/procedural/src/pallet/parse/storage.rs b/substrate/frame/support/procedural/src/pallet/parse/storage.rs index d1c7ba2e5e3c6788827577dfa843205d0be69174..9d96a18b56943db4715fabd30e8bc0f1c0aa7d28 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/storage.rs @@ -29,6 +29,7 @@ mod keyword { syn::custom_keyword!(storage_prefix); syn::custom_keyword!(unbounded); syn::custom_keyword!(whitelist_storage); + syn::custom_keyword!(disable_try_decode_storage); syn::custom_keyword!(OptionQuery); syn::custom_keyword!(ResultQuery); syn::custom_keyword!(ValueQuery); @@ -39,11 +40,13 @@ mod keyword { /// * `#[pallet::storage_prefix = "CustomName"]` /// * `#[pallet::unbounded]` /// * `#[pallet::whitelist_storage] +/// * `#[pallet::disable_try_decode_storage]` pub enum PalletStorageAttr { Getter(syn::Ident, proc_macro2::Span), StorageName(syn::LitStr, proc_macro2::Span), Unbounded(proc_macro2::Span), WhitelistStorage(proc_macro2::Span), + DisableTryDecodeStorage(proc_macro2::Span), } impl PalletStorageAttr { @@ -53,6 +56,7 @@ impl PalletStorageAttr { Self::StorageName(_, span) | Self::Unbounded(span) | Self::WhitelistStorage(span) => *span, + Self::DisableTryDecodeStorage(span) => *span, } } } @@ -93,6 +97,9 @@ impl syn::parse::Parse for PalletStorageAttr { } else if lookahead.peek(keyword::whitelist_storage) { content.parse::()?; Ok(Self::WhitelistStorage(attr_span)) + } else if lookahead.peek(keyword::disable_try_decode_storage) { + content.parse::()?; + Ok(Self::DisableTryDecodeStorage(attr_span)) } else { Err(lookahead.error()) } @@ -104,6 +111,7 @@ struct PalletStorageAttrInfo { rename_as: Option, unbounded: bool, whitelisted: bool, + try_decode: bool, } impl PalletStorageAttrInfo { @@ -112,6 +120,7 @@ impl PalletStorageAttrInfo { let mut rename_as = None; let mut unbounded = false; let mut whitelisted = false; + let mut disable_try_decode_storage = false; for attr in attrs { match attr { PalletStorageAttr::Getter(ident, ..) if getter.is_none() => getter = Some(ident), @@ -119,6 +128,8 @@ impl PalletStorageAttrInfo { rename_as = Some(name), PalletStorageAttr::Unbounded(..) if !unbounded => unbounded = true, PalletStorageAttr::WhitelistStorage(..) if !whitelisted => whitelisted = true, + PalletStorageAttr::DisableTryDecodeStorage(..) if !disable_try_decode_storage => + disable_try_decode_storage = true, attr => return Err(syn::Error::new( attr.attr_span(), @@ -127,7 +138,13 @@ impl PalletStorageAttrInfo { } } - Ok(PalletStorageAttrInfo { getter, rename_as, unbounded, whitelisted }) + Ok(PalletStorageAttrInfo { + getter, + rename_as, + unbounded, + whitelisted, + try_decode: !disable_try_decode_storage, + }) } } @@ -186,6 +203,8 @@ pub struct StorageDef { pub unbounded: bool, /// Whether or not reads to this storage key will be ignored by benchmarking pub whitelisted: bool, + /// Whether or not to try to decode the storage key when running try-runtime checks. + pub try_decode: bool, /// Whether or not a default hasher is allowed to replace `_` pub use_default_hasher: bool, } @@ -775,7 +794,7 @@ impl StorageDef { }; let attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; - let PalletStorageAttrInfo { getter, rename_as, mut unbounded, whitelisted } = + let PalletStorageAttrInfo { getter, rename_as, mut unbounded, whitelisted, try_decode } = PalletStorageAttrInfo::from_attrs(attrs)?; // set all storages to be unbounded if dev_mode is enabled @@ -921,6 +940,7 @@ impl StorageDef { named_generics, unbounded, whitelisted, + try_decode, use_default_hasher, }) } diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index 4a313551aca634b88bed7d0f989943f7075a5050..61787a74012827f5c12b389ecedb0fd254bccae2 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -25,7 +25,7 @@ use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_runtime::{ generic::{CheckedExtrinsic, UncheckedExtrinsic}, - traits::SignedExtension, + traits::Dispatchable, DispatchError, RuntimeDebug, }; use sp_std::fmt; @@ -268,7 +268,8 @@ pub fn extract_actual_weight(result: &DispatchResultWithPostInfo, info: &Dispatc .calc_actual_weight(info) } -/// Extract the actual pays_fee from a dispatch result if any or fall back to the default weight. +/// Extract the actual pays_fee from a dispatch result if any or fall back to the default +/// weight. pub fn extract_actual_pays_fee(result: &DispatchResultWithPostInfo, info: &DispatchInfo) -> Pays { match result { Ok(post_info) => post_info, @@ -368,11 +369,10 @@ where } /// Implementation for unchecked extrinsic. -impl GetDispatchInfo - for UncheckedExtrinsic +impl GetDispatchInfo + for UncheckedExtrinsic where - Call: GetDispatchInfo, - Extra: SignedExtension, + Call: GetDispatchInfo + Dispatchable, { fn get_dispatch_info(&self) -> DispatchInfo { self.function.get_dispatch_info() @@ -380,7 +380,7 @@ where } /// Implementation for checked extrinsic. -impl GetDispatchInfo for CheckedExtrinsic +impl GetDispatchInfo for CheckedExtrinsic where Call: GetDispatchInfo, { @@ -389,21 +389,6 @@ where } } -/// Implementation for test extrinsic. -#[cfg(feature = "std")] -impl GetDispatchInfo - for sp_runtime::testing::TestXt -{ - fn get_dispatch_info(&self) -> DispatchInfo { - // for testing: weight == size. - DispatchInfo { - weight: Weight::from_parts(self.encode().len() as _, 0), - pays_fee: Pays::Yes, - class: self.call.get_dispatch_info().class, - } - } -} - /// A struct holding value for each `DispatchClass`. #[derive(Clone, Eq, PartialEq, Default, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct PerDispatchClass { diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index cd12da6de54e777bd470eb65741d831a99f81ddb..b5b1ac09c609c547f26834b97553ed71fd89cd06 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -54,7 +54,8 @@ pub mod __private { #[cfg(feature = "std")] pub use sp_runtime::{bounded_btree_map, bounded_vec}; pub use sp_runtime::{ - traits::Dispatchable, DispatchError, RuntimeDebug, StateVersion, TransactionOutcome, + traits::{AsSystemOriginSigner, Dispatchable}, + DispatchError, RuntimeDebug, StateVersion, TransactionOutcome, }; #[cfg(feature = "std")] pub use sp_state_machine::BasicExternalities; @@ -75,6 +76,7 @@ pub mod storage; #[cfg(test)] mod tests; pub mod traits; +pub mod transaction_extensions; pub mod weights; #[doc(hidden)] pub mod unsigned { @@ -900,18 +902,20 @@ pub mod pallet_prelude { }; pub use codec::{Decode, Encode, MaxEncodedLen}; pub use frame_support::pallet_macros::*; + /// The optional attribute `#[inject_runtime_type]` can be attached to `RuntimeCall`, /// `RuntimeEvent`, `RuntimeOrigin` or `PalletInfo` in an impl statement that has /// `#[register_default_impl]` attached to indicate that this item is generated by /// `construct_runtime`. /// /// Attaching this attribute to such an item ensures that the combined impl generated via - /// [`#[derive_impl(..)]`](`macro@super::derive_impl`) will use the correct type - /// auto-generated by `construct_runtime!`. + /// [`#[derive_impl(..)]`](`frame_support::derive_impl`) will use the correct + /// type auto-generated by + /// `construct_runtime!`. #[doc = docify::embed!("src/tests/inject_runtime_type.rs", derive_impl_works_with_runtime_type_injection)] /// /// However, if `no_aggregated_types` is specified while using - /// `[`#[derive_impl(..)]`](`macro@super::derive_impl`)`, then these items are attached + /// `[`#[derive_impl(..)]`](`frame_support::derive_impl`)`, then these items are attached /// verbatim to the combined impl. #[doc = docify::embed!("src/tests/inject_runtime_type.rs", derive_impl_works_with_no_aggregated_types)] pub use frame_support_procedural::inject_runtime_type; @@ -931,129 +935,26 @@ pub mod pallet_prelude { pub use sp_weights::Weight; } -/// The `pallet` attribute macro defines a pallet that can be used with -/// [`construct_runtime!`]. It must be attached to a module named `pallet` as follows: +/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to +/// specify pallet information. /// -/// ```ignore -/// #[pallet] -/// pub mod pallet { -/// ... -/// } +/// The struct must be defined as follows: /// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// #[pallet::pallet] // <- the macro +/// pub struct Pallet(_); // <- the struct definition /// -/// Note that various types can be automatically imported using -/// [`frame_support::pallet_prelude`] and `frame_system::pallet_prelude`: -/// -/// ```ignore -/// #[pallet] -/// pub mod pallet { -/// use frame_support::pallet_prelude::*; -/// use frame_system::pallet_prelude::*; -/// ... +/// #[pallet::config] +/// pub trait Config: frame_system::Config {} /// } /// ``` /// -/// # pallet::* Attributes -/// -/// The `pallet` macro will parse any items within your `pallet` module that are annotated with -/// `#[pallet::*]` attributes. Some of these attributes are mandatory and some are optional, -/// and they can attach to different types of items within your pallet depending on the -/// attribute in question. The full list of `#[pallet::*]` attributes is shown below in the -/// order in which they are mentioned in this document: -/// -/// * [`pallet::pallet`](#pallet-struct-placeholder-palletpallet-mandatory) -/// * [`pallet::config`](#config-trait-palletconfig-mandatory) -/// * [`pallet::constant`](#palletconstant) -/// * [`pallet::disable_frame_system_supertrait_check`](#disable_supertrait_check) -/// * [`pallet::generate_store($vis trait Store)`](#palletgenerate_storevis-trait-store) -/// * [`pallet::storage_version`](#palletstorage_version) -/// * [`pallet::hooks`](#hooks-pallethooks-optional) -/// * [`pallet::call`](#call-palletcall-optional) -/// * [`pallet::weight($expr)`](#palletweightexpr) -/// * [`pallet::compact`](#palletcompact-some_arg-some_type) -/// * [`pallet::call_index($idx)`](#palletcall_indexidx) -/// * [`pallet::extra_constants`](#extra-constants-palletextra_constants-optional) -/// * [`pallet::error`](#error-palleterror-optional) -/// * [`pallet::event`](#event-palletevent-optional) -/// * [`pallet::generate_deposit($visibility fn -/// deposit_event)`](#palletgenerate_depositvisibility-fn-deposit_event) -/// * [`pallet::storage`](#storage-palletstorage-optional) -/// * [`pallet::getter(fn $my_getter_fn_name)`](#palletgetterfn-my_getter_fn_name-optional) -/// * [`pallet::storage_prefix = "SomeName"`](#palletstorage_prefix--somename-optional) -/// * [`pallet::unbounded`](#palletunbounded-optional) -/// * [`pallet::whitelist_storage`](#palletwhitelist_storage-optional) -/// * [`cfg(..)`](#cfg-for-storage) (on storage items) -/// * [`pallet::type_value`](#type-value-pallettype_value-optional) -/// * [`pallet::genesis_config`](#genesis-config-palletgenesis_config-optional) -/// * [`pallet::genesis_build`](#genesis-build-palletgenesis_build-optional) -/// * [`pallet::inherent`](#inherent-palletinherent-optional) -/// * [`pallet::validate_unsigned`](#validate-unsigned-palletvalidate_unsigned-optional) -/// * [`pallet::origin`](#origin-palletorigin-optional) -/// * [`pallet::composite_enum`](#composite-enum-palletcomposite_enum-optional) -/// -/// Note that at compile-time, the `#[pallet]` macro will analyze and expand all of these -/// attributes, ultimately removing their AST nodes before they can be parsed as real -/// attribute macro calls. This means that technically we do not need attribute macro -/// definitions for any of these attributes, however, for consistency and discoverability -/// reasons, we still maintain stub attribute macro definitions for all of these attributes in -/// the [`pallet_macros`] module which is automatically included in all pallets as part of the -/// pallet prelude. The actual "work" for all of these attribute macros can be found in the -/// macro expansion for `#[pallet]`. -/// -/// Also note that in this document, pallet attributes are explained using the syntax of -/// non-instantiable pallets. For an example of an instantiable pallet, see [this -/// example](#example-of-an-instantiable-pallet). -/// -/// # Dev Mode (`#[pallet(dev_mode)]`) -/// -/// Specifying the argument `dev_mode` on the `#[pallet]` or `#[frame_support::pallet]` -/// attribute attached to your pallet module will allow you to enable dev mode for a pallet. -/// The aim of dev mode is to loosen some of the restrictions and requirements placed on -/// production pallets for easy tinkering and development. Dev mode pallets should not be used -/// in production. Enabling dev mode has the following effects: -/// -/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By -/// default, dev mode pallets will assume a weight of zero (`0`) if a weight is not -/// specified. This is equivalent to specifying `#[weight(0)]` on all calls that do not -/// specify a weight. -/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By -/// default, dev mode pallets will assume a call index based on the order of the call. -/// * All storages are marked as unbounded, meaning you do not need to implement -/// `MaxEncodedLen` on storage types. This is equivalent to specifying `#[pallet::unbounded]` -/// on all storage type definitions. -/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, -/// these will be replaced by `Blake2_128Concat`. In case of explicit key-binding, `Hasher` -/// can simply be ignored when in `dev_mode`. -/// -/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or -/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This argument -/// cannot be specified anywhere else, including but not limited to the `#[pallet::pallet]` -/// attribute macro. -/// -///
-/// WARNING:
-/// You should not deploy or use dev mode pallets in production. Doing so can break your chain
-/// and therefore should never be done. Once you are done tinkering, you should remove the
-/// 'dev_mode' argument from your #[pallet] declaration and fix any compile errors before
-/// attempting to use your pallet in a production scenario.
-/// 
-/// -/// # Pallet struct placeholder: `#[pallet::pallet]` (mandatory) -/// -/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to specify -/// pallet information. -/// -/// The struct must be defined as follows: -/// ```ignore -/// #[pallet::pallet] -/// pub struct Pallet(_); -/// ``` /// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. /// /// ## Macro expansion: /// -/// The macro adds this attribute to the struct definition: +/// The macro adds this attribute to the Pallet struct definition: /// ```ignore /// #[derive( /// frame_support::CloneNoBound, @@ -1062,1225 +963,947 @@ pub mod pallet_prelude { /// frame_support::RuntimeDebugNoBound, /// )] /// ``` -/// and replaces the type `_` with `PhantomData`. It also implements on the pallet: -/// * [`GetStorageVersion`](`traits::GetStorageVersion`) -/// * [`OnGenesis`](`traits::OnGenesis`): contains some logic to write the pallet version into -/// storage. -/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. +/// and replaces the type `_` with `PhantomData`. /// -/// It declares `type Module` type alias for `Pallet`, used by `construct_runtime`. +/// It also implements on the pallet: /// -/// It implements [`PalletInfoAccess`](`traits::PalletInfoAccess') on `Pallet` to ease access -/// to pallet information given by [`frame_support::traits::PalletInfo`]. (The implementation -/// uses the associated type `frame_system::Config::PalletInfo`). -/// -/// It implements [`StorageInfoTrait`](`traits::StorageInfoTrait`) on `Pallet` which give -/// information about all storages. -/// -/// If the attribute `generate_store` is set then the macro creates the trait `Store` and -/// implements it on `Pallet`. +/// * [`GetStorageVersion`](frame_support::traits::GetStorageVersion) +/// * [`OnGenesis`](frame_support::traits::OnGenesis): contains some logic to write the pallet +/// version into storage. +/// * [`PalletInfoAccess`](frame_support::traits::PalletInfoAccess) to ease access to pallet +/// information given by [`frame_support::traits::PalletInfo`]. (The implementation uses the +/// associated type [`frame_support::traits::PalletInfo`]). +/// * [`StorageInfoTrait`](frame_support::traits::StorageInfoTrait) to give information about +/// storages. /// /// If the attribute `set_storage_max_encoded_len` is set then the macro calls -/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for each storage in the implementation of -/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for the pallet. Otherwise it implements -/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for the pallet using the -/// [`PartialStorageInfoTrait`](`traits::PartialStorageInfoTrait`) implementation of storages. -/// -/// # Config trait: `#[pallet::config]` (mandatory) -/// -/// The mandatory attribute `#[pallet::config]` defines the configurable options for the -/// pallet. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::config] -/// pub trait Config: frame_system::Config + $optionally_some_other_supertraits -/// $optional_where_clause -/// { -/// ... -/// } -/// ``` -/// -/// I.e. a regular trait definition named `Config`, with the supertrait -/// `frame_system::pallet::Config`, and optionally other supertraits and a where clause. -/// (Specifying other supertraits here is known as [tight -/// coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) -/// -/// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds -/// `From` and `IsType<::RuntimeEvent>`. -/// -/// [`pallet::event`](`frame_support::pallet_macros::event`) must be present if `RuntimeEvent` -/// exists as a config item in your `#[pallet::config]`. -/// -/// Also see [`pallet::config`](`frame_support::pallet_macros::config`) -/// -/// ## `pallet::constant` -/// -/// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded by -/// [`Get`](crate::traits::Get) from [`pallet::config`](#palletconfig) into metadata, e.g.: -/// -/// ```ignore -/// #[pallet::config] -/// pub trait Config: frame_system::Config { -/// #[pallet::constant] -/// type Foo: Get; -/// } -/// ``` -/// -/// Also see [`pallet::constant`](`frame_support::pallet_macros::constant`) -/// -/// ## `pallet::disable_frame_system_supertrait_check` -/// -/// -/// To bypass the `frame_system::Config` supertrait check, use the attribute -/// `pallet::disable_frame_system_supertrait_check`, e.g.: -/// -/// ```ignore -/// #[pallet::config] -/// #[pallet::disable_frame_system_supertrait_check] -/// pub trait Config: pallet_timestamp::Config {} -/// ``` -/// -/// NOTE: Bypassing the `frame_system::Config` supertrait check is typically desirable when you -/// want to write an alternative to the `frame_system` pallet. -/// -/// Also see -/// [`pallet::disable_frame_system_supertrait_check`](`frame_support::pallet_macros::disable_frame_system_supertrait_check`) -/// -/// ## Macro expansion: -/// -/// The macro expands pallet constant metadata with the information given by -/// `#[pallet::constant]`. -/// -/// # `pallet::generate_store($vis trait Store)` -/// -/// To generate a `Store` trait associating all storages, annotate your `Pallet` struct with -/// the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: -/// -/// ```ignore -/// #[pallet::pallet] -/// #[pallet::generate_store(pub(super) trait Store)] -/// pub struct Pallet(_); -/// ``` -/// More precisely, the `Store` trait contains an associated type for each storage. It is -/// implemented for `Pallet` allowing access to the storage from pallet struct. -/// -/// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using -/// `::Foo`. -/// -/// NOTE: this attribute is only valid when applied _directly_ to your `Pallet` struct -/// definition. -/// -/// Also see [`pallet::generate_store`](`frame_support::pallet_macros::generate_store`). -/// -/// # `pallet::storage_version` -/// -/// Because the [`pallet::pallet`](#pallet-struct-placeholder-palletpallet-mandatory) macro -/// implements [`traits::GetStorageVersion`], the current storage version needs to be -/// communicated to the macro. This can be done by using the `pallet::storage_version` -/// attribute: -/// -/// ```ignore -/// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); -/// -/// #[pallet::pallet] -/// #[pallet::storage_version(STORAGE_VERSION)] -/// pub struct Pallet(_); -/// ``` -/// -/// If not present, the current storage version is set to the default value. -/// -/// Also see [`pallet::storage_version`](`frame_support::pallet_macros::storage_version`) -/// -/// # Hooks: `#[pallet::hooks]` (optional) -/// -/// The `pallet::hooks` attribute allows you to specify a `Hooks` implementation for `Pallet` -/// that specifies pallet-specific logic. -/// -/// The item the attribute attaches to must be defined as follows: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks> for Pallet $optional_where_clause { -/// ... -/// } -/// ``` -/// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait -/// `Hooks>` (they are defined in preludes), for the type `Pallet` and -/// with an optional where clause. -/// -/// If no `#[pallet::hooks]` exists, then the following default implementation is -/// automatically generated: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks> for Pallet {} -/// ``` -/// -/// Also see [`pallet::hooks`](`frame_support::pallet_macros::hooks`) -/// -/// # Call: `#[pallet::call]` (optional) -/// -/// Implementation of pallet dispatchables. -/// -/// Item must be defined as: -/// ```ignore -/// #[pallet::call] -/// impl Pallet { -/// /// $some_doc -/// #[pallet::weight($ExpressionResultingInWeight)] -/// pub fn $fn_name( -/// origin: OriginFor, -/// $some_arg: $some_type, -/// // or with compact attribute: #[pallet::compact] $some_arg: $some_type, -/// ... -/// ) -> DispatchResultWithPostInfo { // or `-> DispatchResult` -/// ... -/// } -/// ... -/// } -/// ``` -/// I.e. a regular type implementation, with generic `T: Config`, on type `Pallet`, with -/// an optional where clause. -/// -/// ## `#[pallet::weight($expr)]` -/// -/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, the -/// first argument must be `origin: OriginFor`. -/// -/// Also see [`pallet::weight`](`frame_support::pallet_macros::weight`) -/// -/// ### `#[pallet::compact] $some_arg: $some_type` -/// -/// Compact encoding for arguments can be achieved via `#[pallet::compact]`. The function must -/// return a `DispatchResultWithPostInfo` or `DispatchResult`. -/// -/// Also see [`pallet::compact`](`frame_support::pallet_macros::compact`) -/// -/// ## `#[pallet::call_index($idx)]` -/// -/// Each dispatchable may also be annotated with the `#[pallet::call_index($idx)]` attribute, -/// which explicitly defines the codec index for the dispatchable function in the `Call` enum. -/// -/// All call indexes start from 0, until it encounters a dispatchable function with a defined -/// call index. The dispatchable function that lexically follows the function with a defined -/// call index will have that call index, but incremented by 1, e.g. if there are 3 -/// dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn bar` -/// has a call index of 10, then `fn qux` will have an index of 11, instead of 1. -/// -/// **WARNING**: modifying dispatchables, changing their order, removing some, etc., must be -/// done with care. Indeed this will change the outer runtime call type (which is an enum with -/// one variant per pallet), this outer runtime call can be stored on-chain (e.g. in -/// `pallet-scheduler`). Thus migration might be needed. To mitigate against some of this, the -/// `#[pallet::call_index($idx)]` attribute can be used to fix the order of the dispatchable so -/// that the `Call` enum encoding does not change after modification. As a general rule of -/// thumb, it is therefore adventageous to always add new calls to the end so you can maintain -/// the existing order of calls. -/// -/// Also see [`pallet::call_index`](`frame_support::pallet_macros::call_index`) -/// -/// # Extra constants: `#[pallet::extra_constants]` (optional) -/// -/// Allows you to define some extra constants to be added into constant metadata. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::extra_constants] -/// impl Pallet where $optional_where_clause { -/// /// $some_doc -/// $vis fn $fn_name() -> $some_return_type { -/// ... -/// } -/// ... -/// } -/// ``` -/// I.e. a regular rust `impl` block with some optional where clause and functions with 0 args, -/// 0 generics, and some return type. -/// -/// ## Macro expansion -/// -/// The macro add some extra constants to pallet constant metadata. -/// -/// Also see: [`pallet::extra_constants`](`frame_support::pallet_macros::extra_constants`) -/// -/// # Error: `#[pallet::error]` (optional) -/// -/// The `#[pallet::error]` attribute allows you to define an error enum that will be returned -/// from the dispatchable when an error occurs. The information for this error type is then -/// stored in metadata. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::error] -/// pub enum Error { -/// /// $some_optional_doc -/// $SomeFieldLessVariant, -/// /// $some_more_optional_doc -/// $SomeVariantWithOneField(FieldType), -/// ... -/// } -/// ``` -/// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field -/// variants. -/// -/// Any field type in the enum variants must implement [`scale_info::TypeInfo`] in order to be -/// properly used in the metadata, and its encoded size should be as small as possible, -/// preferably 1 byte in size in order to reduce storage size. The error enum itself has an -/// absolute maximum encoded size specified by [`MAX_MODULE_ERROR_ENCODED_SIZE`]. -/// -/// (1 byte can still be 256 different errors. The more specific the error, the easier it is to -/// diagnose problems and give a better experience to the user. Don't skimp on having lots of -/// individual error conditions.) -/// -/// Field types in enum variants must also implement [`PalletError`](traits::PalletError), -/// otherwise the pallet will fail to compile. Rust primitive types have already implemented -/// the [`PalletError`](traits::PalletError) trait along with some commonly used stdlib types -/// such as [`Option`] and -/// [`PhantomData`](`frame_support::__private::sp_std::marker::PhantomData`), and hence in most -/// use cases, a manual implementation is not necessary and is discouraged. -/// -/// The generic `T` must not bound anything and a `where` clause is not allowed. That said, -/// bounds and/or a where clause should not needed for any use-case. -/// -/// Also see: [`pallet::error`](`frame_support::pallet_macros::error`) -/// -/// # Event: `#[pallet::event]` (optional) -/// -/// Allows you to define pallet events. Pallet events are stored under the `system` / `events` -/// key when the block is applied (and then replaced when the next block writes it's events). -/// -/// The Event enum must be defined as follows: -/// -/// ```ignore -/// #[pallet::event] -/// #[pallet::generate_deposit($visibility fn deposit_event)] // Optional -/// pub enum Event<$some_generic> $optional_where_clause { -/// /// Some doc -/// $SomeName($SomeType, $YetanotherType, ...), -/// ... -/// } -/// ``` -/// -/// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none or -/// `T` or `T: Config`, and optional w here clause. -/// -/// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], [`Encode`], [`Decode`], and -/// [`Debug`] (on std only). For ease of use, bound by the trait -/// [`Member`](`frame_support::pallet_prelude::Member`), available in -/// frame_support::pallet_prelude. -/// -/// Also see [`pallet::event`](`frame_support::pallet_macros::event`) -/// -/// ## `#[pallet::generate_deposit($visibility fn deposit_event)]` -/// -/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generates a -/// helper function on `Pallet` that handles deposit events. -/// -/// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. -/// -/// Also see [`pallet::generate_deposit`](`frame_support::pallet_macros::generate_deposit`) -/// -/// # Storage: `#[pallet::storage]` (optional) -/// -/// The `#[pallet::storage]` attribute lets you define some abstract storage inside of runtime -/// storage and also set its metadata. This attribute can be used multiple times. -/// -/// Item should be defined as: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn $getter_name)] // optional -/// $vis type $StorageName<$some_generic> $optional_where_clause -/// = $StorageType<$generic_name = $some_generics, $other_name = $some_other, ...>; -/// ``` -/// -/// or with unnamed generic: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn $getter_name)] // optional -/// $vis type $StorageName<$some_generic> $optional_where_clause -/// = $StorageType<_, $some_generics, ...>; -/// ``` -/// -/// I.e. it must be a type alias, with generics: `T` or `T: Config`. The aliased type must be -/// one of [`StorageValue`](`pallet_prelude::StorageValue`), -/// [`StorageMap`](`pallet_prelude::StorageMap`) or -/// [`StorageDoubleMap`](`pallet_prelude::StorageDoubleMap`). The generic arguments of the -/// storage type can be given in two manners: named and unnamed. For named generic arguments, -/// the name for each argument should match the name defined for it on the storage struct: -/// * [`StorageValue`](`pallet_prelude::StorageValue`) expects `Value` and optionally -/// `QueryKind` and `OnEmpty`, -/// * [`StorageMap`](`pallet_prelude::StorageMap`) expects `Hasher`, `Key`, `Value` and -/// optionally `QueryKind` and `OnEmpty`, -/// * [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`) expects `Hasher`, `Key`, -/// `Value` and optionally `QueryKind` and `OnEmpty`, -/// * [`StorageDoubleMap`](`pallet_prelude::StorageDoubleMap`) expects `Hasher1`, `Key1`, -/// `Hasher2`, `Key2`, `Value` and optionally `QueryKind` and `OnEmpty`. -/// -/// For unnamed generic arguments: Their first generic must be `_` as it is replaced by the -/// macro and other generic must declared as a normal generic type declaration. -/// -/// The `Prefix` generic written by the macro is generated using -/// `PalletInfo::name::>()` and the name of the storage type. E.g. if runtime names -/// the pallet "MyExample" then the storage `type Foo = ...` should use the prefix: -/// `Twox128(b"MyExample") ++ Twox128(b"Foo")`. -/// -/// For the [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`) variant, the `Prefix` -/// also implements -/// [`CountedStorageMapInstance`](`frame_support::storage::types::CountedStorageMapInstance`). -/// It also associates a [`CounterPrefix`](`pallet_prelude::CounterPrefix'), which is -/// implemented the same as above, but the storage prefix is prepend with `"CounterFor"`. E.g. -/// if runtime names the pallet "MyExample" then the storage `type Foo = -/// CountedStorageaMap<...>` will store its counter at the prefix: `Twox128(b"MyExample") ++ -/// Twox128(b"CounterForFoo")`. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::storage] -/// pub(super) type MyStorage = StorageMap; -/// ``` -/// -/// In this case the final prefix used by the map is `Twox128(b"MyExample") ++ -/// Twox128(b"OtherName")`. -/// -/// Also see [`pallet::storage`](`frame_support::pallet_macros::storage`) -/// -/// ## `#[pallet::getter(fn $my_getter_fn_name)]` (optional) -/// -/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows you to define a -/// getter function on `Pallet`. -/// -/// Also see [`pallet::getter`](`frame_support::pallet_macros::getter`) -/// -/// ## `#[pallet::storage_prefix = "SomeName"]` (optional) -/// -/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allows you to define the -/// storage prefix to use, see how `Prefix` generic is implemented above. This is helpful if -/// you wish to rename the storage field but don't want to perform a migration. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::storage_prefix = "foo"] -/// #[pallet::getter(fn my_storage)] -/// pub(super) type MyStorage = StorageMap; -/// ``` -/// -/// or -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn my_storage)] -/// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; -/// ``` -/// -/// Also see [`pallet::storage_prefix`](`frame_support::pallet_macros::storage_prefix`) -/// -/// ## `#[pallet::unbounded]` (optional) -/// -/// The optional attribute `#[pallet::unbounded]` declares the storage as unbounded. When -/// implementating the storage info (when `#[pallet::generate_storage_info]` is specified on -/// the pallet struct placeholder), the size of the storage will be declared as unbounded. This -/// can be useful for storage which can never go into PoV (Proof of Validity). -/// -/// Also see [`pallet::unbounded`](`frame_support::pallet_macros::unbounded`) -/// -/// ## `#[pallet::whitelist_storage]` (optional) -/// -/// The optional attribute `#[pallet::whitelist_storage]` will declare the storage as -/// whitelisted from benchmarking. -/// -/// See -/// [`pallet::whitelist_storage`](frame_support::pallet_macros::whitelist_storage) -/// for more info. -/// -/// ## `#[cfg(..)]` (for storage) -/// The optional attributes `#[cfg(..)]` allow conditional compilation for the storage. -/// -/// E.g: -/// -/// ```ignore -/// #[cfg(feature = "my-feature")] -/// #[pallet::storage] -/// pub(super) type MyStorage = StorageValue; -/// ``` -/// -/// All the `cfg` attributes are automatically copied to the items generated for the storage, -/// i.e. the getter, storage prefix, and the metadata element etc. -/// -/// Any type placed as the `QueryKind` parameter must implement -/// [`frame_support::storage::types::QueryKindTrait`]. There are 3 implementations of this -/// trait by default: -/// -/// 1. [`OptionQuery`](`frame_support::storage::types::OptionQuery`), the default `QueryKind` -/// used when this type parameter is omitted. Specifying this as the `QueryKind` would cause -/// storage map APIs that return a `QueryKind` to instead return an [`Option`], returning -/// `Some` when a value does exist under a specified storage key, and `None` otherwise. -/// 2. [`ValueQuery`](`frame_support::storage::types::ValueQuery`) causes storage map APIs that -/// return a `QueryKind` to instead return the value type. In cases where a value does not -/// exist under a specified storage key, the `OnEmpty` type parameter on `QueryKindTrait` is -/// used to return an appropriate value. -/// 3. [`ResultQuery`](`frame_support::storage::types::ResultQuery`) causes storage map APIs -/// that return a `QueryKind` to instead return a `Result`, with `T` being the value -/// type and `E` being the pallet error type specified by the `#[pallet::error]` attribute. -/// In cases where a value does not exist under a specified storage key, an `Err` with the -/// specified pallet error variant is returned. -/// -/// NOTE: If the `QueryKind` generic parameter is still generic at this stage or is using some -/// type alias then the generation of the getter might fail. In this case the getter can be -/// implemented manually. -/// -/// NOTE: The generic `Hasher` must implement the [`StorageHasher`] trait (or the type is not -/// usable at all). We use [`StorageHasher::METADATA`] for the metadata of the hasher of the -/// storage item. Thus generic hasher is supported. -/// -/// ## Macro expansion -/// -/// For each storage item the macro generates a struct named -/// `_GeneratedPrefixForStorage$NameOfStorage`, and implements -/// [`StorageInstance`](traits::StorageInstance) on it using the pallet and storage name. It -/// then uses it as the first generic of the aliased type. For -/// [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`), -/// [`CountedStorageMapInstance`](`frame_support::storage::types::CountedStorageMapInstance`) -/// is implemented, and another similar struct is generated. -/// -/// For a named generic, the macro will reorder the generics, and remove the names. -/// -/// The macro implements the function `storage_metadata` on the `Pallet` implementing the -/// metadata for all storage items based on their kind: -/// * for a storage value, the type of the value is copied into the metadata -/// * for a storage map, the type of the values and the key's type is copied into the metadata -/// * for a storage double map, the type of the values, and the types of `key1` and `key2` are -/// copied into the metadata. -/// -/// # Type value: `#[pallet::type_value]` (optional) -/// -/// The `#[pallet::type_value]` attribute lets you define a struct implementing the -/// [`Get`](crate::traits::Get) trait to ease use of storage types. This attribute is meant to -/// be used alongside [`#[pallet::storage]`](#storage-palletstorage-optional) to define a -/// storage's default value. This attribute can be used multiple times. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::type_value] -/// fn $MyDefaultName<$some_generic>() -> $default_type $optional_where_clause { $expr } -/// ``` -/// -/// I.e.: a function definition with generics none or `T: Config` and a returned type. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::type_value] -/// fn MyDefault() -> T::Balance { 3.into() } -/// ``` -/// -/// Also see [`pallet::type_value`](`frame_support::pallet_macros::type_value`) -/// -/// # Genesis config: `#[pallet::genesis_config]` (optional) -/// -/// The `#[pallet::genesis_config]` attribute allows you to define the genesis configuration -/// for the pallet. -/// -/// Item is defined as either an enum or a struct. It needs to be public and implement the -/// trait [`BuildGenesisConfig`](`traits::BuildGenesisConfig`) with -/// [`#[pallet::genesis_build]`](#genesis-build-palletgenesis_build-optional). The type -/// generics are constrained to be either none, or `T` or `T: Config`. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::genesis_config] -/// pub struct GenesisConfig { -/// _myfield: BalanceOf, -/// } -/// ``` -/// -/// Also see [`pallet::genesis_config`](`frame_support::pallet_macros::genesis_config`) -/// -/// # Genesis build: `#[pallet::genesis_build]` (optional) -/// -/// The `#[pallet::genesis_build]` attribute allows you to define how `genesis_configuration` -/// is built. This takes as input the `GenesisConfig` type (as `self`) and constructs the -/// pallet's initial state. -/// -/// The impl must be defined as: -/// -/// ```ignore -/// #[pallet::genesis_build] -/// impl GenesisBuild for GenesisConfig<$maybe_generics> { -/// fn build(&self) { $expr } -/// } -/// ``` -/// -/// I.e. a trait implementation with generic `T: Config`, of trait `GenesisBuild` on -/// type `GenesisConfig` with generics none or `T`. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::genesis_build] -/// impl GenesisBuild for GenesisConfig { -/// fn build(&self) {} -/// } -/// ``` -/// -/// Also see [`pallet::genesis_build`](`frame_support::pallet_macros::genesis_build`) -/// -/// # Inherent: `#[pallet::inherent]` (optional) -/// -/// The `#[pallet::inherent]` attribute allows the pallet to provide some -/// [inherent](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). -/// An inherent is some piece of data that is inserted by a block authoring node at block -/// creation time and can either be accepted or rejected by validators based on whether the -/// data falls within an acceptable range. -/// -/// The most common inherent is the `timestamp` that is inserted into every block. Since there -/// is no way to validate timestamps, validators simply check that the timestamp reported by -/// the block authoring node falls within an acceptable range. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::inherent] -/// impl ProvideInherent for Pallet { -/// // ... regular trait implementation -/// } -/// ``` +/// [`StorageInfoTrait`](frame_support::traits::StorageInfoTrait) for each storage in the +/// implementation of [`StorageInfoTrait`](frame_support::traits::StorageInfoTrait) for the +/// pallet. Otherwise it implements +/// [`StorageInfoTrait`](frame_support::traits::StorageInfoTrait) for the pallet using the +/// [`PartialStorageInfoTrait`](frame_support::traits::PartialStorageInfoTrait) +/// implementation of storages. /// -/// I.e. a trait implementation with bound `T: Config`, of trait -/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`) for type `Pallet`, and some -/// optional where clause. +/// ## Dev Mode (`#[pallet(dev_mode)]`) /// -/// Also see [`pallet::inherent`](`frame_support::pallet_macros::inherent`) +/// Specifying the argument `dev_mode` will allow you to enable dev mode for a pallet. The +/// aim of dev mode is to loosen some of the restrictions and requirements placed on +/// production pallets for easy tinkering and development. Dev mode pallets should not be +/// used in production. Enabling dev mode has the following effects: /// -/// # Validate unsigned: `#[pallet::validate_unsigned]` (optional) -/// -/// The `#[pallet::validate_unsigned]` attribute allows the pallet to validate some unsigned -/// transaction: -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::validate_unsigned] -/// impl ValidateUnsigned for Pallet { -/// // ... regular trait implementation -/// } -/// ``` -/// -/// I.e. a trait implementation with bound `T: Config`, of trait -/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) for type `Pallet`, and some -/// optional where clause. -/// -/// NOTE: There is also the [`sp_runtime::traits::SignedExtension`] trait that can be used to -/// add some specific logic for transaction validation. -/// -/// Also see [`pallet::validate_unsigned`](`frame_support::pallet_macros::validate_unsigned`) -/// -/// # Origin: `#[pallet::origin]` (optional) -/// -/// The `#[pallet::origin]` attribute allows you to define some origin for the pallet. -/// -/// Item must be either a type alias, an enum, or a struct. It needs to be public. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::origin] -/// pub struct Origin(PhantomData<(T)>); -/// ``` -/// -/// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin -/// can be stored on-chain (e.g. in `pallet-scheduler`), thus any change must be done with care -/// as it might require some migration. -/// -/// NOTE: for instantiable pallets, the origin must be generic over `T` and `I`. -/// -/// Also see [`pallet::origin`](`frame_support::pallet_macros::origin`) -/// -/// # Composite enum `#[pallet::composite_enum]` (optional) -/// -/// The `#[pallet::composite_enum]` attribute allows you to define an enum on the pallet which -/// will then instruct `construct_runtime` to amalgamate all similarly-named enums from other -/// pallets into an aggregate enum. This is similar in principle with how the aggregate enum is -/// generated for `#[pallet::event]` or `#[pallet::error]`. -/// -/// The item tagged with `#[pallet::composite_enum]` MUST be an enum declaration, and can ONLY -/// be the following identifiers: `FreezeReason`, `HoldReason`, `LockId` or `SlashReason`. -/// Custom identifiers are not supported. -/// -/// NOTE: For ease of usage, when no `#[derive]` attributes are detected, the -/// `#[pallet::composite_enum]` attribute will automatically derive the following traits for -/// the enum: -/// -/// ```ignore -/// Copy, Clone, Eq, PartialEq, Encode, Decode, MaxEncodedLen, TypeInfo, RuntimeDebug -/// ``` -/// -/// The inverse is also true: if there are any #[derive] attributes present for the enum, then -/// the attribute will not automatically derive any of the traits described above. -/// -/// # General notes on instantiable pallets -/// -/// An instantiable pallet is one where Config is generic, i.e. `Config`. This allows -/// runtime to implement multiple instances of the pallet, by using different types for the -/// generic. This is the sole purpose of the generic `I`, but because -/// [`PalletInfo`](`traits::PalletInfo`) requires the `Pallet` placeholder to be static, it is -/// important to bound by `'static` whenever [`PalletInfo`](`traits::PalletInfo`) can be used. -/// Additionally, in order to make an instantiable pallet usable as a regular pallet without an -/// instance, it is important to bound by `= ()` on every type. -/// -/// Thus impl bound looks like `impl, I: 'static>`, and types look like -/// `SomeType` or `SomeType, I: 'static = ()>`. -/// -/// # Example of a non-instantiable pallet -/// -/// ``` -/// pub use pallet::*; // reexport in crate namespace for `construct_runtime!` -/// -/// #[frame_support::pallet] -/// // NOTE: The name of the pallet is provided by `construct_runtime` and is used as -/// // the unique identifier for the pallet's storage. It is not defined in the pallet itself. -/// pub mod pallet { -/// use frame_support::pallet_prelude::*; // Import various types used in the pallet definition -/// use frame_system::pallet_prelude::*; // Import some system helper types. -/// -/// type BalanceOf = ::Balance; -/// -/// // Define the generic parameter of the pallet -/// // The macro parses `#[pallet::constant]` attributes and uses them to generate metadata -/// // for the pallet's constants. -/// #[pallet::config] -/// pub trait Config: frame_system::Config { -/// #[pallet::constant] // put the constant in metadata -/// type MyGetParam: Get; -/// type Balance: Parameter + MaxEncodedLen + From; -/// type RuntimeEvent: From> + IsType<::RuntimeEvent>; -/// } -/// -/// // Define some additional constant to put into the constant metadata. -/// #[pallet::extra_constants] -/// impl Pallet { -/// /// Some description -/// fn exra_constant_name() -> u128 { 4u128 } -/// } -/// -/// // Define the pallet struct placeholder, various pallet function are implemented on it. -/// #[pallet::pallet] -/// #[pallet::generate_store(pub(super) trait Store)] -/// pub struct Pallet(_); -/// -/// // Implement the pallet hooks. -/// #[pallet::hooks] -/// impl Hooks> for Pallet { -/// fn on_initialize(_n: BlockNumberFor) -> Weight { -/// unimplemented!(); -/// } -/// -/// // can implement also: on_finalize, on_runtime_upgrade, offchain_worker, ... -/// // see `Hooks` trait -/// } -/// -/// // Declare Call struct and implement dispatchables. -/// // -/// // WARNING: Each parameter used in functions must implement: Clone, Debug, Eq, PartialEq, -/// // Codec. -/// // -/// // The macro parses `#[pallet::compact]` attributes on function arguments and implements -/// // the `Call` encoding/decoding accordingly. -/// #[pallet::call] -/// impl Pallet { -/// /// Doc comment put in metadata -/// #[pallet::weight(0)] // Defines weight for call (function parameters are in scope) -/// pub fn toto( -/// origin: OriginFor, -/// #[pallet::compact] _foo: u32, -/// ) -> DispatchResultWithPostInfo { -/// let _ = origin; -/// unimplemented!(); -/// } -/// } -/// -/// // Declare the pallet `Error` enum (this is optional). -/// // The macro generates error metadata using the doc comment on each variant. -/// #[pallet::error] -/// pub enum Error { -/// /// doc comment put into metadata -/// InsufficientProposersBalance, -/// } -/// -/// // Declare pallet Event enum (this is optional). -/// // -/// // WARNING: Each type used in variants must implement: Clone, Debug, Eq, PartialEq, Codec. -/// // -/// // The macro generates event metadata, and derive Clone, Debug, Eq, PartialEq and Codec -/// #[pallet::event] -/// // Generate a funciton on Pallet to deposit an event. -/// #[pallet::generate_deposit(pub(super) fn deposit_event)] -/// pub enum Event { -/// /// doc comment put in metadata -/// // `::AccountId` is not defined in metadata list, the last -/// // Thus the metadata is `::AccountId`. -/// Proposed(::AccountId), -/// /// doc -/// // here metadata will be `Balance` as define in metadata list -/// Spending(BalanceOf), -/// // here metadata will be `Other` as define in metadata list -/// Something(u32), -/// } -/// -/// // Define a struct which implements `frame_support::traits::Get` (optional). -/// #[pallet::type_value] -/// pub(super) fn MyDefault() -> T::Balance { 3.into() } -/// -/// // Declare a storage item. Any amount of storage items can be declared (optional). -/// // -/// // Is expected either `StorageValue`, `StorageMap` or `StorageDoubleMap`. -/// // The macro generates the prefix type and replaces the first generic `_`. -/// // -/// // The macro expands the metadata for the storage item with the type used: -/// // * for a storage value the type of the value is copied into the metadata -/// // * for a storage map the type of the values and the type of the key is copied into the metadata -/// // * for a storage double map the types of the values and keys are copied into the -/// // metadata. -/// // -/// // NOTE: The generic `Hasher` must implement the `StorageHasher` trait (or the type is not -/// // usable at all). We use [`StorageHasher::METADATA`] for the metadata of the hasher of the -/// // storage item. Thus generic hasher is supported. -/// #[pallet::storage] -/// pub(super) type MyStorageValue = -/// StorageValue>; -/// -/// // Another storage declaration -/// #[pallet::storage] -/// #[pallet::getter(fn my_storage)] -/// #[pallet::storage_prefix = "SomeOtherName"] -/// pub(super) type MyStorage = -/// StorageMap; -/// -/// // Declare the genesis config (optional). -/// // -/// // The macro accepts either a struct or an enum; it checks that generics are consistent. -/// // -/// // Type must implement the `Default` trait. -/// #[pallet::genesis_config] -/// #[derive(frame_support::DefaultNoBound)] -/// pub struct GenesisConfig { -/// _config: sp_std::marker::PhantomData, -/// _myfield: u32, -/// } -/// -/// // Declare genesis builder. (This is need only if GenesisConfig is declared) -/// #[pallet::genesis_build] -/// impl BuildGenesisConfig for GenesisConfig { -/// fn build(&self) {} -/// } -/// -/// // Declare a pallet origin (this is optional). -/// // -/// // The macro accept type alias or struct or enum, it checks generics are consistent. -/// #[pallet::origin] -/// pub struct Origin(PhantomData); -/// -/// // Declare a hold reason (this is optional). -/// // -/// // Creates a hold reason for this pallet that is aggregated by `construct_runtime`. -/// // A similar enum can be defined for `FreezeReason`, `LockId` or `SlashReason`. -/// #[pallet::composite_enum] -/// pub enum HoldReason { -/// SomeHoldReason -/// } -/// -/// // Declare validate_unsigned implementation (this is optional). -/// #[pallet::validate_unsigned] -/// impl ValidateUnsigned for Pallet { -/// type Call = Call; -/// fn validate_unsigned( -/// source: TransactionSource, -/// call: &Self::Call -/// ) -> TransactionValidity { -/// Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) -/// } -/// } -/// -/// // Declare inherent provider for pallet (this is optional). -/// #[pallet::inherent] -/// impl ProvideInherent for Pallet { -/// type Call = Call; -/// type Error = InherentError; -/// -/// const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; -/// -/// fn create_inherent(_data: &InherentData) -> Option { -/// unimplemented!(); -/// } -/// -/// fn is_inherent(_call: &Self::Call) -> bool { -/// unimplemented!(); -/// } -/// } -/// -/// // Regular rust code needed for implementing ProvideInherent trait -/// -/// #[derive(codec::Encode, sp_runtime::RuntimeDebug)] -/// #[cfg_attr(feature = "std", derive(codec::Decode))] -/// pub enum InherentError { -/// } -/// -/// impl sp_inherents::IsFatalError for InherentError { -/// fn is_fatal_error(&self) -> bool { -/// unimplemented!(); -/// } -/// } -/// -/// pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"testpall"; -/// } -/// ``` -/// -/// # Example of an instantiable pallet -/// -/// ``` -/// pub use pallet::*; -/// -/// #[frame_support::pallet] -/// pub mod pallet { -/// use frame_support::pallet_prelude::*; -/// use frame_system::pallet_prelude::*; -/// -/// type BalanceOf = >::Balance; +/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By +/// default, dev mode pallets will assume a weight of zero (`0`) if a weight is not +/// specified. This is equivalent to specifying `#[weight(0)]` on all calls that do not +/// specify a weight. +/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By +/// default, dev mode pallets will assume a call index based on the order of the call. +/// * All storages are marked as unbounded, meaning you do not need to implement +/// [`MaxEncodedLen`](frame_support::pallet_prelude::MaxEncodedLen) on storage types. This is +/// equivalent to specifying `#[pallet::unbounded]` on all storage type definitions. +/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, +/// these will be replaced by `Blake2_128Concat`. In case of explicit key-binding, `Hasher` +/// can simply be ignored when in `dev_mode`. /// -/// #[pallet::config] -/// pub trait Config: frame_system::Config { -/// #[pallet::constant] -/// type MyGetParam: Get; -/// type Balance: Parameter + MaxEncodedLen + From; -/// type RuntimeEvent: From> + IsType<::RuntimeEvent>; -/// } -/// -/// #[pallet::extra_constants] -/// impl, I: 'static> Pallet { -/// /// Some description -/// fn extra_constant_name() -> u128 { 4u128 } -/// } -/// -/// #[pallet::pallet] -/// #[pallet::generate_store(pub(super) trait Store)] -/// pub struct Pallet(PhantomData<(T, I)>); -/// -/// #[pallet::hooks] -/// impl, I: 'static> Hooks> for Pallet { -/// } -/// -/// #[pallet::call] -/// impl, I: 'static> Pallet { -/// /// Doc comment put in metadata -/// #[pallet::weight(0)] -/// pub fn toto(origin: OriginFor, #[pallet::compact] _foo: u32) -> DispatchResultWithPostInfo { -/// let _ = origin; -/// unimplemented!(); -/// } -/// } -/// -/// #[pallet::error] -/// pub enum Error { -/// /// doc comment put into metadata -/// InsufficientProposersBalance, -/// } -/// -/// #[pallet::event] -/// #[pallet::generate_deposit(pub(super) fn deposit_event)] -/// pub enum Event, I: 'static = ()> { -/// /// doc comment put in metadata -/// Proposed(::AccountId), -/// /// doc -/// Spending(BalanceOf), -/// Something(u32), -/// } -/// -/// #[pallet::type_value] -/// pub(super) fn MyDefault, I: 'static>() -> T::Balance { 3.into() } -/// -/// #[pallet::storage] -/// pub(super) type MyStorageValue, I: 'static = ()> = -/// StorageValue>; -/// -/// #[pallet::storage] -/// #[pallet::getter(fn my_storage)] -/// #[pallet::storage_prefix = "SomeOtherName"] -/// pub(super) type MyStorage = -/// StorageMap; -/// -/// #[pallet::genesis_config] -/// #[derive(frame_support::DefaultNoBound)] -/// pub struct GenesisConfig, I: 'static = ()> { -/// _config: sp_std::marker::PhantomData<(T,I)>, -/// _myfield: u32, -/// } -/// -/// #[pallet::genesis_build] -/// impl, I: 'static> BuildGenesisConfig for GenesisConfig { -/// fn build(&self) {} -/// } -/// -/// #[pallet::origin] -/// pub struct Origin(PhantomData<(T, I)>); -/// -/// #[pallet::composite_enum] -/// pub enum HoldReason { -/// SomeHoldReason -/// } -/// -/// #[pallet::validate_unsigned] -/// impl, I: 'static> ValidateUnsigned for Pallet { -/// type Call = Call; -/// fn validate_unsigned( -/// source: TransactionSource, -/// call: &Self::Call -/// ) -> TransactionValidity { -/// Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) -/// } -/// } -/// -/// #[pallet::inherent] -/// impl, I: 'static> ProvideInherent for Pallet { -/// type Call = Call; -/// type Error = InherentError; -/// -/// const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; -/// -/// fn create_inherent(_data: &InherentData) -> Option { -/// unimplemented!(); -/// } -/// -/// fn is_inherent(_call: &Self::Call) -> bool { -/// unimplemented!(); -/// } -/// } -/// -/// // Regular rust code needed for implementing ProvideInherent trait -/// -/// #[derive(codec::Encode, sp_runtime::RuntimeDebug)] -/// #[cfg_attr(feature = "std", derive(codec::Decode))] -/// pub enum InherentError { -/// } -/// -/// impl sp_inherents::IsFatalError for InherentError { -/// fn is_fatal_error(&self) -> bool { -/// unimplemented!(); -/// } -/// } -/// -/// pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"testpall"; -/// } -/// ``` +/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or +/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This +/// argument cannot be specified anywhere else, including but not limited to the +/// `#[pallet::pallet]` attribute macro. /// -/// # Upgrade guidelines -/// -/// 1. Export the metadata of the pallet for later checks -/// - run your node with the pallet active -/// - query the metadata using the `state_getMetadata` RPC and curl, or use `subsee -p -/// > meta.json` -/// 2. Generate the template upgrade for the pallet provided by `decl_storage` with the -/// environment variable `PRINT_PALLET_UPGRADE`: `PRINT_PALLET_UPGRADE=1 cargo check -p -/// my_pallet`. This template can be used as it contains all information for storages, -/// genesis config and genesis build. -/// 3. Reorganize the pallet to have the trait `Config`, `decl_*` macros, -/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`), -/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`), and Origin` all together in one -/// file. Suggested order: -/// * `Config`, -/// * `decl_module`, -/// * `decl_event`, -/// * `decl_error`, -/// * `decl_storage`, -/// * `origin`, -/// * `validate_unsigned`, -/// * `provide_inherent`, so far it should compile and all be correct. -/// 4. start writing the new pallet module -/// ```ignore -/// pub use pallet::*; -/// -/// #[frame_support::pallet] -/// pub mod pallet { -/// use frame_support::pallet_prelude::*; -/// use frame_system::pallet_prelude::*; -/// use super::*; -/// -/// #[pallet::pallet] -/// #[pallet::generate_store($visibility_of_trait_store trait Store)] -/// // NOTE: if the visibility of trait store is private but you want to make it available -/// // in super, then use `pub(super)` or `pub(crate)` to make it available in crate. -/// pub struct Pallet(_); -/// // pub struct Pallet(PhantomData); // for instantiable pallet -/// } -/// ``` -/// 5. **migrate Config**: move trait into the module with -/// * all const in `decl_module` to [`#[pallet::constant]`](#palletconstant) -/// * add the bound `IsType<::RuntimeEvent>` to `type -/// RuntimeEvent` -/// 7. **migrate decl_module**: write: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks for Pallet { -/// } -/// ``` -/// and write inside `on_initialize`, `on_finalize`, `on_runtime_upgrade`, -/// `offchain_worker`, and `integrity_test`. -/// -/// then write: -/// ```ignore -/// #[pallet::call] -/// impl Pallet { -/// } -/// ``` -/// and write inside all the calls in `decl_module` with a few changes in the signature: -/// - origin must now be written completely, e.g. `origin: OriginFor` -/// - result type must be `DispatchResultWithPostInfo`, you need to write it and also you -/// might need to put `Ok(().into())` at the end or the function. -/// - `#[compact]` must now be written -/// [`#[pallet::compact]`](#palletcompact-some_arg-some_type) -/// - `#[weight = ..]` must now be written [`#[pallet::weight(..)]`](#palletweightexpr) -/// -/// 7. **migrate event**: rewrite as a simple enum with the attribute -/// [`#[pallet::event]`](#event-palletevent-optional), use [`#[pallet::generate_deposit($vis -/// fn deposit_event)]`](#event-palletevent-optional) to generate `deposit_event`, -/// 8. **migrate error**: rewrite it with attribute -/// [`#[pallet::error]`](#error-palleterror-optional). -/// 9. **migrate storage**: `decl_storage` provide an upgrade template (see 3.). All storages, -/// genesis config, genesis build and default implementation of genesis config can be taken -/// from it directly. -/// -/// Otherwise here is the manual process: -/// -/// first migrate the genesis logic. write: -/// ```ignore -/// #[pallet::genesis_config] -/// struct GenesisConfig { -/// // fields of add_extra_genesis -/// } -/// impl Default for GenesisConfig { -/// // type default or default provided for fields -/// } -/// #[pallet::genesis_build] -/// impl GenesisBuild for GenesisConfig { -/// // for instantiable pallet: -/// // `impl GenesisBuild for GenesisConfig { -/// fn build() { -/// // The add_extra_genesis build logic -/// } -/// } -/// ``` -/// for each storage, if it contains `config(..)` then add fields, and make it default to -/// the value in `= ..;` or the type default if none, if it contains no build then also add -/// the logic to build the value. for each storage if it contains `build(..)` then add the -/// logic to `genesis_build`. -/// -/// NOTE: within `decl_storage`: the individual config is executed first, followed by the -/// build and finally the `add_extra_genesis` build. -/// -/// Once this is done you can migrate storages individually, a few notes: -/// - for private storage use `pub(crate) type ` or `pub(super) type` or nothing, -/// - for storages with `get(fn ..)` use [`#[pallet::getter(fn -/// ...)]`](#palletgetterfn-my_getter_fn_name-optional) -/// - for storages with value being `Option<$something>` make generic `Value` being -/// `$something` and generic `QueryKind` being `OptionQuery` (note: this is default). -/// Otherwise make `Value` the complete value type and `QueryKind` being `ValueQuery`. -/// - for storages with default value: `= $expr;` provide some specific `OnEmpty` generic. -/// To do so use of `#[pallet::type_value]` to generate the wanted struct to put. -/// example: `MyStorage: u32 = 3u32` would be written: -/// -/// ```ignore -/// #[pallet::type_value] fn MyStorageOnEmpty() -> u32 { 3u32 } -/// #[pallet::storage] -/// pub(super) type MyStorage = StorageValue<_, u32, ValueQuery, MyStorageOnEmpty>; -/// ``` -/// -/// NOTE: `decl_storage` also generates the functions `assimilate_storage` and -/// `build_storage` directly on `GenesisConfig`, and these are sometimes used in tests. -/// In order not to break they can be implemented manually, one can implement those -/// functions by calling the `GenesisBuild` implementation. -/// 10. **migrate origin**: move the origin to the pallet module to be under a -/// [`#[pallet::origin]`](#origin-palletorigin-optional) attribute -/// 11. **migrate validate_unsigned**: move the -/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) implementation to the pallet -/// module under a -/// [`#[pallet::validate_unsigned]`](#validate-unsigned-palletvalidate_unsigned-optional) -/// attribute -/// 12. **migrate provide_inherent**: move the -/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`) implementation to the pallet -/// module under a [`#[pallet::inherent]`](#inherent-palletinherent-optional) attribute -/// 13. rename the usage of `Module` to `Pallet` inside the crate. -/// 14. migration is done, now double check the migration with the checking migration -/// guidelines shown below. -/// -/// # Checking upgrade guidelines: -/// -/// * compare metadata. Use [subsee](https://github.com/ascjones/subsee) to fetch the metadata -/// and do a diff of the resulting json before and after migration. This checks for: -/// * call, names, signature, docs -/// * event names, docs -/// * error names, docs -/// * storage names, hasher, prefixes, default value -/// * error, error, constant -/// * manually check that: -/// * `Origin` was moved inside the macro under -/// [`#[pallet::origin]`](#origin-palletorigin-optional) if it exists -/// * [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) was moved inside the macro -/// under -/// [`#[pallet::validate_unsigned)]`](#validate-unsigned-palletvalidate_unsigned-optional) -/// if it exists -/// * [`ProvideInherent`](`pallet_prelude::ProvideInherent`) was moved inside the macro -/// under [`#[pallet::inherent)]`](#inherent-palletinherent-optional) if it exists -/// * `on_initialize` / `on_finalize` / `on_runtime_upgrade` / `offchain_worker` were moved -/// to the `Hooks` implementation -/// * storages with `config(..)` were converted to `GenesisConfig` field, and their default -/// is `= $expr;` if the storage has a default value -/// * storages with `build($expr)` or `config(..)` were built in `GenesisBuild::build` -/// * `add_extra_genesis` fields were converted to `GenesisConfig` field with their correct -/// default if specified -/// * `add_extra_genesis` build was written into `GenesisBuild::build` -/// * storage items defined with [`pallet`] use the name of the pallet provided by -/// [`traits::PalletInfo::name`] as `pallet_prefix` (in `decl_storage`, storage items used -/// the `pallet_prefix` given as input of `decl_storage` with the syntax `as Example`). Thus -/// a runtime using the pallet must be careful with this change. To handle this change: -/// * either ensure that the name of the pallet given to `construct_runtime!` is the same -/// as the name the pallet was giving to `decl_storage`, -/// * or do a storage migration from the old prefix used to the new prefix used. -/// -/// NOTE: The prefixes used by storage items are in metadata. Thus, ensuring the metadata -/// hasn't changed ensures that the `pallet_prefix`s used by the storage items haven't changed. -/// -/// # Notes when macro fails to show proper error message spans: -/// -/// Rustc loses span for some macro input. Some tips to fix it: -/// * do not use inner attribute: -/// ```ignore -/// #[pallet] -/// pub mod pallet { -/// //! This inner attribute will make span fail -/// .. -/// } -/// ``` -/// * use the newest nightly possible. +///
+/// WARNING:
+/// You should not deploy or use dev mode pallets in production. Doing so can break your
+/// chain and therefore should never be done. Once you are done tinkering, you should
+/// remove the 'dev_mode' argument from your #[pallet] declaration and fix any compile
+/// errors before attempting to use your pallet in a production scenario.
+/// 
pub use frame_support_procedural::pallet; -/// Contains macro stubs for all of the pallet:: macros -pub mod pallet_macros { - pub use frame_support_procedural::{ - composite_enum, config, disable_frame_system_supertrait_check, error, event, - extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, - import_section, inherent, no_default, no_default_bounds, origin, pallet_section, - storage_prefix, storage_version, type_value, unbounded, validate_unsigned, weight, - whitelist_storage, - }; +/// Contains macro stubs for all of the `pallet::` macros +pub mod pallet_macros { + /// Declare the storage as whitelisted from benchmarking. + /// + /// Doing so will exclude reads of that value's storage key from counting towards weight + /// calculations during benchmarking. + /// + /// This attribute should only be attached to storages that are known to be + /// read/used in every block. This will result in a more accurate benchmarking weight. + /// + /// ### Example + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::whitelist_storage] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::whitelist_storage; + + /// Allows specifying the weight of a call. + /// + /// Each dispatchable needs to define a weight with the `#[pallet::weight($expr)]` + /// attribute. The first argument must be `origin: OriginFor`. + /// + /// ## Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::call] + /// impl Pallet { + /// #[pallet::weight({0})] // <- set actual weight here + /// #[pallet::call_index(0)] + /// pub fn something( + /// _: OriginFor, + /// foo: u32, + /// ) -> DispatchResult { + /// unimplemented!() + /// } + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::weight; + + /// Allows whitelisting a storage item from decoding during try-runtime checks. + /// + /// The optional attribute `#[pallet::disable_try_decode_storage]` will declare the + /// storage as whitelisted from decoding during try-runtime checks. This should only be + /// attached to transient storage which cannot be migrated during runtime upgrades. + /// + /// ### Example + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::disable_try_decode_storage] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::disable_try_decode_storage; + + /// Declares a storage as unbounded in potential size. + /// + /// When implementating the storage info (when `#[pallet::generate_storage_info]` is + /// specified on the pallet struct placeholder), the size of the storage will be declared + /// as unbounded. This can be useful for storage which can never go into PoV (Proof of + /// Validity). + /// + /// ## Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::unbounded] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::unbounded; + + /// Defines what storage prefix to use for a storage item when building the trie. + /// + /// This is helpful if you wish to rename the storage field but don't want to perform a + /// migration. + /// + /// ## Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::storage_prefix = "foo"] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::storage_prefix; + + /// Ensures the generated `DefaultConfig` will not have any bounds for + /// that trait item. + /// + /// Attaching this attribute to a trait item ensures that the generated trait + /// `DefaultConfig` will not have any bounds for this trait item. + /// + /// As an example, if you have a trait item `type AccountId: SomeTrait;` in your `Config` + /// trait, the generated `DefaultConfig` will only have `type AccountId;` with no trait + /// bound. + pub use frame_support_procedural::no_default_bounds; + + /// Ensures the trait item will not be used as a default with the + /// `#[derive_impl(..)]` attribute macro. + /// + /// The optional attribute `#[pallet::no_default]` can be attached to trait items within a + /// `Config` trait impl that has [`#[pallet::config(with_default)]`](`config`) + /// attached. + pub use frame_support_procedural::no_default; + + /// Declares a module as importable into a pallet via + /// [`#[import_section]`](`import_section`). + /// + /// Note that sections are imported by their module name/ident, and should be referred to + /// by their _full path_ from the perspective of the target pallet. Do not attempt to make + /// use of `use` statements to bring pallet sections into scope, as this will not work + /// (unless you do so as part of a wildcard import, in which case it will work). + /// + /// ## Naming Logistics + /// + /// Also note that because of how `#[pallet_section]` works, pallet section names must be + /// globally unique _within the crate in which they are defined_. For more information on + /// why this must be the case, see macro_magic's + /// [`#[export_tokens]`](https://docs.rs/macro_magic/latest/macro_magic/attr.export_tokens.html) macro. + /// + /// Optionally, you may provide an argument to `#[pallet_section]` such as + /// `#[pallet_section(some_ident)]`, in the event that there is another pallet section in + /// same crate with the same ident/name. The ident you specify can then be used instead of + /// the module's ident name when you go to import it via + /// [`#[import_section]`](`import_section`). + pub use frame_support_procedural::pallet_section; + + /// The `#[pallet::inherent]` attribute allows the pallet to provide + /// [inherents](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). + /// + /// An inherent is some piece of data that is inserted by a block authoring node at block + /// creation time and can either be accepted or rejected by validators based on whether the + /// data falls within an acceptable range. + /// + /// The most common inherent is the `timestamp` that is inserted into every block. Since + /// there is no way to validate timestamps, validators simply check that the timestamp + /// reported by the block authoring node falls within an acceptable range. + /// + /// Example usage: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_support::inherent::IsFatalError; + /// # use sp_timestamp::InherentError; + /// # use sp_std::result; + /// # + /// // Example inherent identifier + /// pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"timstap0"; + /// + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::inherent] + /// impl ProvideInherent for Pallet { + /// type Call = Call; + /// type Error = InherentError; + /// const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + /// + /// fn create_inherent(data: &InherentData) -> Option { + /// unimplemented!() + /// } + /// + /// fn check_inherent( + /// call: &Self::Call, + /// data: &InherentData, + /// ) -> result::Result<(), Self::Error> { + /// unimplemented!() + /// } + /// + /// fn is_inherent(call: &Self::Call) -> bool { + /// unimplemented!() + /// } + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// I.e. a trait implementation with bound `T: Config`, of trait `ProvideInherent` for type + /// `Pallet`, and some optional where clause. + /// + /// ## Macro expansion + /// + /// The macro currently makes no use of this information, but it might use this information + /// in the future to give information directly to `construct_runtime`. + pub use frame_support_procedural::inherent; + + /// Splits a pallet declaration into multiple parts. + /// + /// An attribute macro that can be attached to a module declaration. Doing so will + /// import the contents of the specified external pallet section that is defined + /// elsewhere using [`#[pallet_section]`](`pallet_section`). + /// + /// ## Example + /// ``` + /// # use frame_support::pallet_macros::pallet_section; + /// # use frame_support::pallet_macros::import_section; + /// # + /// /// A [`pallet_section`] that defines the events for a pallet. + /// /// This can later be imported into the pallet using [`import_section`]. + /// #[pallet_section] + /// mod events { + /// #[pallet::event] + /// #[pallet::generate_deposit(pub(super) fn deposit_event)] + /// pub enum Event { + /// /// Event documentation should end with an array that provides descriptive names for event + /// /// parameters. [something, who] + /// SomethingStored { something: u32, who: T::AccountId }, + /// } + /// } + /// + /// #[import_section(events)] + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config { + /// # type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// # } + /// } + /// ``` + /// + /// This will result in the contents of `some_section` being _verbatim_ imported into + /// the pallet above. Note that since the tokens for `some_section` are essentially + /// copy-pasted into the target pallet, you cannot refer to imports that don't also + /// exist in the target pallet, but this is easily resolved by including all relevant + /// `use` statements within your pallet section, so they are imported as well, or by + /// otherwise ensuring that you have the same imports on the target pallet. + /// + /// It is perfectly permissible to import multiple pallet sections into the same pallet, + /// which can be done by having multiple `#[import_section(something)]` attributes + /// attached to the pallet. + /// + /// Note that sections are imported by their module name/ident, and should be referred to + /// by their _full path_ from the perspective of the target pallet. + pub use frame_support_procedural::import_section; + + /// Allows defining getter functions on `Pallet` storage. + /// + /// ## Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::getter(fn my_getter_fn_name)] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// See [`pallet::storage`](`frame_support::pallet_macros::storage`) for more info. + pub use frame_support_procedural::getter; + + /// Allows generating the `Store` trait for all storages. + /// + /// DEPRECATED: Will be removed, do not use. + /// See for more details. + /// + /// To generate a `Store` trait associating all storages, annotate your `Pallet` struct + /// with the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: + /// + /// ```ignore + /// #[pallet::pallet] + /// #[pallet::generate_store(pub(super) trait Store)] + /// pub struct Pallet(_); + /// ``` + /// More precisely, the `Store` trait contains an associated type for each storage. It is + /// implemented for `Pallet` allowing access to the storage from pallet struct. + /// + /// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using + /// `::Foo`. + pub use frame_support_procedural::generate_store; + + /// Defines constants that are added to the constant field of + /// [`PalletMetadata`](frame_metadata::v15::PalletMetadata) struct for this pallet. + /// + /// Must be defined like: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # + /// #[pallet::extra_constants] + /// impl Pallet // $optional_where_clause + /// { + /// #[pallet::constant_name(SomeU32ConstantName)] + /// /// Some doc + /// fn some_u32_constant() -> u32 { + /// 100u32 + /// } + /// } + /// } + /// ``` + /// + /// I.e. a regular rust `impl` block with some optional where clause and functions with 0 + /// args, 0 generics, and some return type. + pub use frame_support_procedural::extra_constants; + + #[rustfmt::skip] + /// Allows bypassing the `frame_system::Config` supertrait check. + /// + /// To bypass the syntactic `frame_system::Config` supertrait check, use the attribute + /// `pallet::disable_frame_system_supertrait_check`. + /// + /// Note this bypass is purely syntactic, and does not actually remove the requirement that your + /// pallet implements `frame_system::Config`. When using this check, your config is still required to implement + /// `frame_system::Config` either via + /// - Implementing a trait that itself implements `frame_system::Config` + /// - Tightly coupling it with another pallet which itself implements `frame_system::Config` + /// + /// e.g. + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// trait OtherTrait: frame_system::Config {} + /// + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::config] + /// #[pallet::disable_frame_system_supertrait_check] + /// pub trait Config: OtherTrait {} + /// } + /// ``` + /// + /// To learn more about supertraits, see the + /// [trait_based_programming](../../polkadot_sdk_docs/reference_docs/trait_based_programming/index.html) + /// reference doc. + pub use frame_support_procedural::disable_frame_system_supertrait_check; + + /// The mandatory attribute allowing definition of configurable types for the pallet. + /// + /// Item must be defined as: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::config] + /// pub trait Config: frame_system::Config // + $optionally_some_other_supertraits + /// // $optional_where_clause + /// { + /// // config items here + /// } + /// } + /// ``` + /// + /// I.e. a regular trait definition named `Config`, with the supertrait + /// [`frame_system::pallet::Config`](../../frame_system/pallet/trait.Config.html), and + /// optionally other supertraits and a where clause. (Specifying other supertraits here is + /// known as [tight coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) + /// + /// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds + /// `From` and `IsType<::RuntimeEvent>`. + /// + /// [`#[pallet::event]`](`event`) must be present if `RuntimeEvent` + /// exists as a config item in your `#[pallet::config]`. + /// + /// ## Optional: `with_default` + /// + /// An optional `with_default` argument may also be specified. Doing so will automatically + /// generate a `DefaultConfig` trait inside your pallet which is suitable for use with + /// [`#[derive_impl(..)`](`frame_support::derive_impl`) to derive a default testing + /// config: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # use core::fmt::Debug; + /// # use frame_support::traits::Contains; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::config(with_default)] // <- with_default is optional + /// pub trait Config: frame_system::Config { + /// /// The overarching event type. + /// #[pallet::no_default_bounds] // Default is not supported for RuntimeEvent + /// type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// + /// // ...other config items get default + /// } + /// + /// #[pallet::event] + /// pub enum Event { + /// SomeEvent(u16, u32), + /// } + /// } + /// ``` + /// + /// As shown above, you may also attach the [`#[pallet::no_default]`](`no_default`) + /// attribute to specify that a particular trait item _cannot_ be used as a default when a + /// test `Config` is derived using the [`#[derive_impl(..)]`](`frame_support::derive_impl`) + /// attribute macro. This will cause that particular trait item to simply not appear in + /// default testing configs based on this config (the trait item will not be included in + /// `DefaultConfig`). + /// + /// ### `DefaultConfig` Caveats + /// + /// The auto-generated `DefaultConfig` trait: + /// - is always a _subset_ of your pallet's `Config` trait. + /// - can only contain items that don't rely on externalities, such as + /// `frame_system::Config`. + /// + /// Trait items that _do_ rely on externalities should be marked with + /// [`#[pallet::no_default]`](`no_default`) + /// + /// Consequently: + /// - Any items that rely on externalities _must_ be marked with + /// [`#[pallet::no_default]`](`no_default`) or your trait will fail to compile when used + /// with [`derive_impl`](`frame_support::derive_impl`). + /// - Items marked with [`#[pallet::no_default]`](`no_default`) are entirely excluded from + /// the `DefaultConfig` trait, and therefore any impl of `DefaultConfig` doesn't need to + /// implement such items. + /// + /// For more information, see [`frame_support::derive_impl`]. + pub use frame_support_procedural::config; + + /// Allows defining an enum that gets composed as an aggregate enum by `construct_runtime`. + /// + /// The `#[pallet::composite_enum]` attribute allows you to define an enum that gets + /// composed as an aggregate enum by `construct_runtime`. This is similar in principle with + /// [frame_support_procedural::event] and [frame_support_procedural::error]. + /// + /// The attribute currently only supports enum definitions, and identifiers that are named + /// `FreezeReason`, `HoldReason`, `LockId` or `SlashReason`. Arbitrary identifiers for the + /// enum are not supported. The aggregate enum generated by + /// [`frame_support::construct_runtime`](frame_support::construct_runtime) will have the + /// name of `RuntimeFreezeReason`, `RuntimeHoldReason`, `RuntimeLockId` and + /// `RuntimeSlashReason` respectively. + /// + /// NOTE: The aggregate enum generated by `construct_runtime` generates a conversion + /// function from the pallet enum to the aggregate enum, and automatically derives the + /// following traits: + /// + /// ```ignore + /// Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo, + /// RuntimeDebug + /// ``` + /// + /// For ease of usage, when no `#[derive]` attributes are found for the enum under + /// [`#[pallet::composite_enum]`](composite_enum), the aforementioned traits are + /// automatically derived for it. The inverse is also true: if there are any `#[derive]` + /// attributes found for the enum, then no traits will automatically be derived for it. + /// + /// e.g, defining `HoldReason` in a pallet + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::composite_enum] + /// pub enum HoldReason { + /// /// The NIS Pallet has reserved it for a non-fungible receipt. + /// #[codec(index = 0)] + /// SomeHoldReason, + /// #[codec(index = 1)] + /// SomeOtherHoldReason, + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + pub use frame_support_procedural::composite_enum; + + /// Allows the pallet to validate unsigned transactions. + /// + /// Item must be defined as: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::validate_unsigned] + /// impl sp_runtime::traits::ValidateUnsigned for Pallet { + /// type Call = Call; + /// + /// fn validate_unsigned(_source: TransactionSource, _call: &Self::Call) -> TransactionValidity { + /// // Your implementation details here + /// unimplemented!() + /// } + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// I.e. a trait implementation with bound `T: Config`, of trait + /// [`ValidateUnsigned`](frame_support::pallet_prelude::ValidateUnsigned) for + /// type `Pallet`, and some optional where clause. + /// + /// NOTE: There is also the [`sp_runtime::traits::TransactionExtension`] trait that can be + /// used to add some specific logic for transaction validation. + /// + /// ## Macro expansion + /// + /// The macro currently makes no use of this information, but it might use this information + /// in the future to give information directly to [`frame_support::construct_runtime`]. + pub use frame_support_procedural::validate_unsigned; + + /// Allows defining a struct implementing the [`Get`](frame_support::traits::Get) trait to + /// ease the use of storage types. + /// + /// This attribute is meant to be used alongside [`#[pallet::storage]`](`storage`) to + /// define a storage's default value. This attribute can be used multiple times. + /// + /// Item must be defined as: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use sp_runtime::FixedU128; + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// pub(super) type SomeStorage = + /// StorageValue<_, FixedU128, ValueQuery, DefaultForSomeValue>; + /// + /// // Define default for ParachainId + /// #[pallet::type_value] + /// pub fn DefaultForSomeValue() -> FixedU128 { + /// FixedU128::from_u32(1) + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// ## Macro expansion + /// + /// The macro renames the function to some internal name, generates a struct with the + /// original name of the function and its generic, and implements `Get<$ReturnType>` by + /// calling the user defined function. + pub use frame_support_procedural::type_value; + + /// Allows defining a storage version for the pallet. + /// + /// Because the `pallet::pallet` macro implements + /// [`GetStorageVersion`](frame_support::traits::GetStorageVersion), the current storage + /// version needs to be communicated to the macro. This can be done by using the + /// `pallet::storage_version` attribute: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::StorageVersion; + /// # use frame_support::traits::GetStorageVersion; + /// # + /// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); + /// + /// #[pallet::pallet] + /// #[pallet::storage_version(STORAGE_VERSION)] + /// pub struct Pallet(_); + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// If not present, the current storage version is set to the default value. + pub use frame_support_procedural::storage_version; + + /// The `#[pallet::hooks]` attribute allows you to specify a + /// [`frame_support::traits::Hooks`] implementation for `Pallet` that specifies + /// pallet-specific logic. + /// + /// The item the attribute attaches to must be defined as follows: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::hooks] + /// impl Hooks> for Pallet { + /// // Implement hooks here + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait + /// `Hooks>` (they are defined in preludes), for the type `Pallet`. + /// + /// Optionally, you could add a where clause. + /// + /// ## Macro expansion + /// + /// The macro implements the traits + /// [`OnInitialize`](frame_support::traits::OnInitialize), + /// [`OnIdle`](frame_support::traits::OnIdle), + /// [`OnFinalize`](frame_support::traits::OnFinalize), + /// [`OnRuntimeUpgrade`](frame_support::traits::OnRuntimeUpgrade), + /// [`OffchainWorker`](frame_support::traits::OffchainWorker), and + /// [`IntegrityTest`](frame_support::traits::IntegrityTest) using + /// the provided [`Hooks`](frame_support::traits::Hooks) implementation. + /// + /// NOTE: `OnRuntimeUpgrade` is implemented with `Hooks::on_runtime_upgrade` and some + /// additional logic. E.g. logic to write the pallet version into storage. + /// + /// NOTE: The macro also adds some tracing logic when implementing the above traits. The + /// following hooks emit traces: `on_initialize`, `on_finalize` and `on_runtime_upgrade`. + pub use frame_support_procedural::hooks; + + /// Generates a helper function on `Pallet` that handles deposit events. + /// + /// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. + /// + /// ## Macro expansion + /// + /// The macro will add on enum `Event` the attributes: + /// * `#[derive(`[`frame_support::CloneNoBound`]`)]` + /// * `#[derive(`[`frame_support::EqNoBound`]`)]` + /// * `#[derive(`[`frame_support::PartialEqNoBound`]`)]` + /// * `#[derive(`[`frame_support::RuntimeDebugNoBound`]`)]` + /// * `#[derive(`[`codec::Encode`]`)]` + /// * `#[derive(`[`codec::Decode`]`)]` + /// + /// The macro implements `From>` for (). + /// + /// The macro implements a metadata function on `Event` returning the `EventMetadata`. + /// + /// If `#[pallet::generate_deposit]` is present then the macro implements `fn + /// deposit_event` on `Pallet`. + pub use frame_support_procedural::generate_deposit; + + /// Allows defining logic to make an extrinsic call feeless. + /// + /// Each dispatchable may be annotated with the `#[pallet::feeless_if($closure)]` + /// attribute, which explicitly defines the condition for the dispatchable to be feeless. + /// + /// The arguments for the closure must be the referenced arguments of the dispatchable + /// function. + /// + /// The closure must return `bool`. + /// + /// ### Example + /// + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::call] + /// impl Pallet { + /// #[pallet::call_index(0)] + /// /// Marks this call as feeless if `foo` is zero. + /// #[pallet::feeless_if(|_origin: &OriginFor, foo: &u32| -> bool { + /// *foo == 0 + /// })] + /// pub fn something( + /// _: OriginFor, + /// foo: u32, + /// ) -> DispatchResult { + /// unimplemented!() + /// } + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// Please note that this only works for signed dispatchables and requires a signed + /// extension such as [`pallet_skip_feeless_payment::SkipCheckIfFeeless`] to wrap the + /// existing payment extension. Else, this is completely ignored and the dispatchable is + /// still charged. + /// + /// ### Macro expansion + /// + /// The macro implements the [`pallet_skip_feeless_payment::CheckIfFeeless`] trait on the + /// dispatchable and calls the corresponding closure in the implementation. + /// + /// [`pallet_skip_feeless_payment::SkipCheckIfFeeless`]: ../../pallet_skip_feeless_payment/struct.SkipCheckIfFeeless.html + /// [`pallet_skip_feeless_payment::CheckIfFeeless`]: ../../pallet_skip_feeless_payment/struct.SkipCheckIfFeeless.html + pub use frame_support_procedural::feeless_if; + + /// Allows defining an error enum that will be returned from the dispatchable when an error + /// occurs. + /// + /// The information for this error type is then stored in runtime metadata. + /// + /// Item must be defined as so: + /// + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// mod pallet { + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::error] + /// pub enum Error { + /// /// SomeFieldLessVariant doc + /// SomeFieldLessVariant, + /// /// SomeVariantWithOneField doc + /// SomeVariantWithOneField(u32), + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field + /// variants. + /// + /// Any field type in the enum variants must implement [`scale_info::TypeInfo`] in order to + /// be properly used in the metadata, and its encoded size should be as small as possible, + /// preferably 1 byte in size in order to reduce storage size. The error enum itself has an + /// absolute maximum encoded size specified by + /// [`frame_support::MAX_MODULE_ERROR_ENCODED_SIZE`]. + /// + /// (1 byte can still be 256 different errors. The more specific the error, the easier it + /// is to diagnose problems and give a better experience to the user. Don't skimp on having + /// lots of individual error conditions.) + /// + /// Field types in enum variants must also implement [`frame_support::PalletError`], + /// otherwise the pallet will fail to compile. Rust primitive types have already + /// implemented the [`frame_support::PalletError`] trait along with some commonly used + /// stdlib types such as [`Option`] and [`sp_std::marker::PhantomData`], and hence + /// in most use cases, a manual implementation is not necessary and is discouraged. + /// + /// The generic `T` must not bound anything and a `where` clause is not allowed. That said, + /// bounds and/or a where clause should not needed for any use-case. + /// + /// ## Macro expansion + /// + /// The macro implements the [`Debug`] trait and functions `as_u8` using variant position, + /// and `as_str` using variant doc. + /// + /// The macro also implements `From>` for `&'static str` and `From>` for + /// `DispatchError`. + pub use frame_support_procedural::error; + + /// Allows defining pallet events. + /// + /// Pallet events are stored under the `system` / `events` key when the block is applied + /// (and then replaced when the next block writes it's events). + /// + /// The Event enum can be defined as follows: + /// + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// mod pallet { + /// # use frame_support::pallet_prelude::IsType; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::event] + /// #[pallet::generate_deposit(fn deposit_event)] // Optional + /// pub enum Event { + /// /// SomeEvent doc + /// SomeEvent(u16, u32), // SomeEvent with two fields + /// } + /// + /// #[pallet::config] + /// pub trait Config: frame_system::Config { + /// /// The overarching runtime event type. + /// type RuntimeEvent: From> + /// + IsType<::RuntimeEvent>; + /// } + /// } + /// ``` + /// + /// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none + /// or `T` or `T: Config`, and optional w here clause. + /// + /// `RuntimeEvent` must be defined in the `Config`, as shown in the example. + /// + /// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], [`codec::Encode`], + /// [`codec::Decode`], and [`Debug`] (on std only). For ease of use, bound by the trait + /// `Member`, available in [`frame_support::pallet_prelude`]. + pub use frame_support_procedural::event; - /// Allows a pallet to declare a set of functions as a *dispatchable extrinsic*. In - /// slightly simplified terms, this macro declares the set of "transactions" of a pallet. + /// Allows a pallet to declare a set of functions as a *dispatchable extrinsic*. + /// + /// In slightly simplified terms, this macro declares the set of "transactions" of a + /// pallet. /// /// > The exact definition of **extrinsic** can be found in /// > [`sp_runtime::generic::UncheckedExtrinsic`]. @@ -2382,14 +2005,24 @@ pub mod pallet_macros { /// If no `#[pallet::call]` exists, then a default implementation corresponding to the /// following code is automatically generated: /// - /// ```ignore - /// #[pallet::call] - /// impl Pallet {} + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// mod pallet { + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::call] // <- automatically generated + /// impl Pallet {} // <- automatically generated + /// + /// #[pallet::config] + /// pub trait Config: frame_system::Config {} + /// } /// ``` pub use frame_support_procedural::call; - /// Enforce the index of a variant in the generated `enum Call`. See [`call`] for more - /// information. + /// Enforce the index of a variant in the generated `enum Call`. + /// + /// See [`call`] for more information. /// /// All call indexes start from 0, until it encounters a dispatchable function with a /// defined call index. The dispatchable function that lexically follows the function with @@ -2399,7 +2032,9 @@ pub mod pallet_macros { pub use frame_support_procedural::call_index; /// Declares the arguments of a [`call`] function to be encoded using - /// [`codec::Compact`]. This will results in smaller extrinsic encoding. + /// [`codec::Compact`]. + /// + /// This will results in smaller extrinsic encoding. /// /// A common example of `compact` is for numeric values that are often times far far away /// from their theoretical maximum. For example, in the context of a crypto-currency, the @@ -2495,8 +2130,8 @@ pub mod pallet_macros { /// ``` pub use frame_support_procedural::genesis_build; - /// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded - /// by [`Get`](frame_support::pallet_prelude::Get) from [`pallet::config`](`macro@config`) + /// Allows adding an associated type trait bounded by + /// [`Get`](frame_support::pallet_prelude::Get) from [`pallet::config`](`macro@config`) /// into metadata. /// /// ## Example @@ -2517,9 +2152,10 @@ pub mod pallet_macros { /// ``` pub use frame_support_procedural::constant; - /// Declares a type alias as a storage item. Storage items are pointers to data stored - /// on-chain (the *blockchain state*), under a specific key. The exact key is dependent on - /// the type of the storage. + /// Declares a type alias as a storage item. + /// + /// Storage items are pointers to data stored on-chain (the *blockchain state*), under a + /// specific key. The exact key is dependent on the type of the storage. /// /// > From the perspective of this pallet, the entire blockchain state is abstracted behind /// > a key-value api, namely [`sp_io::storage`]. @@ -2699,6 +2335,8 @@ pub mod pallet_macros { /// * [`macro@getter`]: Creates a custom getter function. /// * [`macro@storage_prefix`]: Overrides the default prefix of the storage item. /// * [`macro@unbounded`]: Declares the storage item as unbounded. + /// * [`macro@disable_try_decode_storage`]: Declares that try-runtime checks should not + /// attempt to decode the storage item. /// /// #### Example /// ``` @@ -2714,10 +2352,14 @@ pub mod pallet_macros { /// #[pallet::getter(fn foo)] /// #[pallet::storage_prefix = "OtherFoo"] /// #[pallet::unbounded] + /// #[pallet::disable_try_decode_storage] /// pub type Foo = StorageValue<_, u32, ValueQuery>; /// } /// ``` pub use frame_support_procedural::storage; + + /// Allows defining conditions for a task to run. + /// /// This attribute is attached to a function inside an `impl` block annoated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the conditions for a /// given work item to be valid. @@ -2726,6 +2368,9 @@ pub mod pallet_macros { /// should have the same signature as the function it is attached to, except that it should /// return a `bool` instead. pub use frame_support_procedural::task_condition; + + /// Allows defining an index for a task. + /// /// This attribute is attached to a function inside an `impl` block annoated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the index of a given /// work item. @@ -2733,22 +2378,29 @@ pub mod pallet_macros { /// It takes an integer literal as input, which is then used to define the index. This /// index should be unique for each function in the `impl` block. pub use frame_support_procedural::task_index; + + /// Allows defining an iterator over available work items for a task. + /// /// This attribute is attached to a function inside an `impl` block annoated with - /// [`pallet::tasks_experimental`](`tasks_experimental`) to define an iterator over the - /// available work items for a task. + /// [`pallet::tasks_experimental`](`tasks_experimental`). /// /// It takes an iterator as input that yields a tuple with same types as the function /// arguments. pub use frame_support_procedural::task_list; + + /// Allows defining the weight of a task. + /// /// This attribute is attached to a function inside an `impl` block annoated with /// [`pallet::tasks_experimental`](`tasks_experimental`) define the weight of a given work /// item. /// /// It takes a closure as input, which should return a `Weight` value. pub use frame_support_procedural::task_weight; + /// Allows you to define some service work that can be recognized by a script or an - /// off-chain worker. Such a script can then create and submit all such work items at any - /// given time. + /// off-chain worker. + /// + /// Such a script can then create and submit all such work items at any given time. /// /// These work items are defined as instances of the [`Task`](frame_support::traits::Task) /// trait. [`pallet:tasks_experimental`](`tasks_experimental`) when attached to an `impl` @@ -2773,6 +2425,64 @@ pub mod pallet_macros { /// Now, this can be executed as follows: #[doc = docify::embed!("src/tests/tasks.rs", tasks_work)] pub use frame_support_procedural::tasks_experimental; + + /// Allows a pallet to declare a type as an origin. + /// + /// If defined as such, this type will be amalgamated at the runtime level into + /// `RuntimeOrigin`, very similar to [`call`], [`error`] and [`event`]. See + /// [`composite_enum`] for similar cases. + /// + /// Origin is a complex FRAME topics and is further explained in `polkadot_sdk_docs`. + /// + /// ## Syntax Variants + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// /// On the spot declaration. + /// #[pallet::origin] + /// #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + /// pub enum Origin { + /// Foo, + /// Bar, + /// } + /// } + /// ``` + /// + /// Or, more commonly used: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + /// pub enum RawOrigin { + /// Foo, + /// Bar, + /// } + /// + /// #[pallet::origin] + /// pub type Origin = RawOrigin; + /// } + /// ``` + /// + /// ## Warning + /// + /// Modifying any pallet's origin type will cause the runtime level origin type to also + /// change in encoding. If stored anywhere on-chain, this will require a data migration. + /// + /// Read more about origins at the [Origin Reference + /// Docs](../../polkadot_sdk_docs/reference_docs/frame_origin/index.html). + pub use frame_support_procedural::origin; } #[deprecated(note = "Will be removed after July 2023; Use `sp_runtime::traits` directly instead.")] diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index d059a992a8664cd8c3583f7f6fd1bff6af102770..2ceab44cb16bd4bcf356caa07a3506aa76ba79db 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -16,13 +16,21 @@ // limitations under the License. use crate::{ - traits::{GetStorageVersion, NoStorageVersionSet, PalletInfoAccess, StorageVersion}, - weights::{RuntimeDbWeight, Weight}, + defensive, + storage::transactional::with_transaction_opaque_err, + traits::{ + Defensive, GetStorageVersion, NoStorageVersionSet, PalletInfoAccess, SafeMode, + StorageVersion, + }, + weights::{RuntimeDbWeight, Weight, WeightMeter}, }; +use codec::{Decode, Encode, MaxEncodedLen}; use impl_trait_for_tuples::impl_for_tuples; +use sp_arithmetic::traits::Bounded; use sp_core::Get; use sp_io::{hashing::twox_128, storage::clear_prefix, KillStorageResult}; -use sp_std::marker::PhantomData; +use sp_runtime::traits::Zero; +use sp_std::{marker::PhantomData, vec::Vec}; /// Handles storage migration pallet versioning. /// @@ -43,12 +51,30 @@ use sp_std::marker::PhantomData; /// Otherwise, a warning is logged notifying the developer that the upgrade was a noop and should /// probably be removed. /// +/// It is STRONGLY RECOMMENDED to write the unversioned migration logic in a private module and +/// only export the versioned migration logic to prevent accidentally using the unversioned +/// migration in any runtimes. +/// /// ### Examples /// ```ignore /// // In file defining migrations -/// pub struct VersionUncheckedMigrateV5ToV6(sp_std::marker::PhantomData); -/// impl OnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { -/// // OnRuntimeUpgrade implementation... +/// +/// /// Private module containing *version unchecked* migration logic. +/// /// +/// /// Should only be used by the [`VersionedMigration`] type in this module to create something to +/// /// export. +/// /// +/// /// We keep this private so the unversioned migration cannot accidentally be used in any runtimes. +/// /// +/// /// For more about this pattern of keeping items private, see +/// /// - https://github.com/rust-lang/rust/issues/30905 +/// /// - https://internals.rust-lang.org/t/lang-team-minutes-private-in-public-rules/4504/40 +/// mod version_unchecked { +/// use super::*; +/// pub struct MigrateV5ToV6(sp_std::marker::PhantomData); +/// impl OnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { +/// // OnRuntimeUpgrade implementation... +/// } /// } /// /// pub type MigrateV5ToV6 = @@ -73,7 +99,7 @@ pub struct VersionedMigration), @@ -91,7 +117,7 @@ impl< const FROM: u16, const TO: u16, Inner: crate::traits::OnRuntimeUpgrade, - Pallet: GetStorageVersion + PalletInfoAccess, + Pallet: GetStorageVersion + PalletInfoAccess, DbWeight: Get, > crate::traits::OnRuntimeUpgrade for VersionedMigration { @@ -100,7 +126,6 @@ impl< /// migration ran or not. #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - use codec::Encode; let on_chain_version = Pallet::on_chain_storage_version(); if on_chain_version == FROM { Ok(VersionedPostUpgradeData::MigrationExecuted(Inner::pre_upgrade()?).encode()) @@ -163,25 +188,25 @@ impl< } } -/// Can store the current pallet version in storage. -pub trait StoreCurrentStorageVersion { - /// Write the current storage version to the storage. - fn store_current_storage_version(); +/// Can store the in-code pallet version on-chain. +pub trait StoreInCodeStorageVersion { + /// Write the in-code storage version on-chain. + fn store_in_code_storage_version(); } -impl + PalletInfoAccess> - StoreCurrentStorageVersion for StorageVersion +impl + PalletInfoAccess> + StoreInCodeStorageVersion for StorageVersion { - fn store_current_storage_version() { - let version = ::current_storage_version(); + fn store_in_code_storage_version() { + let version = ::in_code_storage_version(); version.put::(); } } -impl + PalletInfoAccess> - StoreCurrentStorageVersion for NoStorageVersionSet +impl + PalletInfoAccess> + StoreInCodeStorageVersion for NoStorageVersionSet { - fn store_current_storage_version() { + fn store_in_code_storage_version() { StorageVersion::default().put::(); } } @@ -193,7 +218,7 @@ pub trait PalletVersionToStorageVersionHelper { impl PalletVersionToStorageVersionHelper for T where - T::CurrentStorageVersion: StoreCurrentStorageVersion, + T::InCodeStorageVersion: StoreInCodeStorageVersion, { fn migrate(db_weight: &RuntimeDbWeight) -> Weight { const PALLET_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__PALLET_VERSION__:"; @@ -204,8 +229,7 @@ where sp_io::storage::clear(&pallet_version_key(::name())); - >::store_current_storage_version( - ); + >::store_in_code_storage_version(); db_weight.writes(2) } @@ -226,7 +250,7 @@ impl PalletVersionToStorageVersionHelper for T { /// Migrate from the `PalletVersion` struct to the new [`StorageVersion`] struct. /// -/// This will remove all `PalletVersion's` from the state and insert the current storage version. +/// This will remove all `PalletVersion's` from the state and insert the in-code storage version. pub fn migrate_from_pallet_version_to_storage_version< Pallets: PalletVersionToStorageVersionHelper, >( @@ -344,3 +368,622 @@ impl, DbWeight: Get> frame_support::traits Ok(()) } } + +/// A migration that can proceed in multiple steps. +pub trait SteppedMigration { + /// The cursor type that stores the progress (aka. state) of this migration. + type Cursor: codec::FullCodec + codec::MaxEncodedLen; + + /// The unique identifier type of this migration. + type Identifier: codec::FullCodec + codec::MaxEncodedLen; + + /// The unique identifier of this migration. + /// + /// If two migrations have the same identifier, then they are assumed to be identical. + fn id() -> Self::Identifier; + + /// The maximum number of steps that this migration can take. + /// + /// This can be used to enforce progress and prevent migrations becoming stuck forever. A + /// migration that exceeds its max steps is treated as failed. `None` means that there is no + /// limit. + fn max_steps() -> Option { + None + } + + /// Try to migrate as much as possible with the given weight. + /// + /// **ANY STORAGE CHANGES MUST BE ROLLED-BACK BY THE CALLER UPON ERROR.** This is necessary + /// since the caller cannot return a cursor in the error case. [`Self::transactional_step`] is + /// provided as convenience for a caller. A cursor of `None` implies that the migration is at + /// its end. A migration that once returned `Nonce` is guaranteed to never be called again. + fn step( + cursor: Option, + meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError>; + + /// Same as [`Self::step`], but rolls back pending changes in the error case. + fn transactional_step( + mut cursor: Option, + meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + with_transaction_opaque_err(move || match Self::step(cursor, meter) { + Ok(new_cursor) => { + cursor = new_cursor; + sp_runtime::TransactionOutcome::Commit(Ok(cursor)) + }, + Err(err) => sp_runtime::TransactionOutcome::Rollback(Err(err)), + }) + .map_err(|()| SteppedMigrationError::Failed)? + } +} + +/// Error that can occur during a [`SteppedMigration`]. +#[derive(Debug, Encode, Decode, MaxEncodedLen, scale_info::TypeInfo)] +pub enum SteppedMigrationError { + // Transient errors: + /// The remaining weight is not enough to do anything. + /// + /// Can be resolved by calling with at least `required` weight. Note that calling it with + /// exactly `required` weight could cause it to not make any progress. + InsufficientWeight { + /// Amount of weight required to make progress. + required: Weight, + }, + // Permanent errors: + /// The migration cannot decode its cursor and therefore not proceed. + /// + /// This should not happen unless (1) the migration itself returned an invalid cursor in a + /// previous iteration, (2) the storage got corrupted or (3) there is a bug in the caller's + /// code. + InvalidCursor, + /// The migration encountered a permanent error and cannot continue. + Failed, +} + +/// Notification handler for status updates regarding Multi-Block-Migrations. +#[impl_trait_for_tuples::impl_for_tuples(8)] +pub trait MigrationStatusHandler { + /// Notifies of the start of a runtime migration. + fn started() {} + + /// Notifies of the completion of a runtime migration. + fn completed() {} +} + +/// Handles a failed runtime migration. +/// +/// This should never happen, but is here for completeness. +pub trait FailedMigrationHandler { + /// Infallibly handle a failed runtime migration. + /// + /// Gets passed in the optional index of the migration in the batch that caused the failure. + /// Returning `None` means that no automatic handling should take place and the callee decides + /// in the implementation what to do. + fn failed(migration: Option) -> FailedMigrationHandling; +} + +/// Do now allow any transactions to be processed after a runtime upgrade failed. +/// +/// This is **not a sane default**, since it prevents governance intervention. +pub struct FreezeChainOnFailedMigration; + +impl FailedMigrationHandler for FreezeChainOnFailedMigration { + fn failed(_migration: Option) -> FailedMigrationHandling { + FailedMigrationHandling::KeepStuck + } +} + +/// Enter safe mode on a failed runtime upgrade. +/// +/// This can be very useful to manually intervene and fix the chain state. `Else` is used in case +/// that the safe mode could not be entered. +pub struct EnterSafeModeOnFailedMigration( + PhantomData<(SM, Else)>, +); + +impl FailedMigrationHandler + for EnterSafeModeOnFailedMigration +where + ::BlockNumber: Bounded, +{ + fn failed(migration: Option) -> FailedMigrationHandling { + let entered = if SM::is_entered() { + SM::extend(Bounded::max_value()) + } else { + SM::enter(Bounded::max_value()) + }; + + // If we could not enter or extend safe mode (for whatever reason), then we try the next. + if entered.is_err() { + Else::failed(migration) + } else { + FailedMigrationHandling::KeepStuck + } + } +} + +/// How to proceed after a runtime upgrade failed. +/// +/// There is NO SANE DEFAULT HERE. All options are very dangerous and should be used with care. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum FailedMigrationHandling { + /// Resume extrinsic processing of the chain. This will not resume the upgrade. + /// + /// This should be supplemented with additional measures to ensure that the broken chain state + /// does not get further messed up by user extrinsics. + ForceUnstuck, + /// Set the cursor to `Stuck` and keep blocking extrinsics. + KeepStuck, + /// Don't do anything with the cursor and let the handler decide. + /// + /// This can be useful in cases where the other two options would overwrite any changes that + /// were done by the handler to the cursor. + Ignore, +} + +/// Something that can do multi step migrations. +pub trait MultiStepMigrator { + /// Hint for whether [`Self::step`] should be called. + fn ongoing() -> bool; + + /// Do the next step in the MBM process. + /// + /// Must gracefully handle the case that it is currently not upgrading. + fn step() -> Weight; +} + +impl MultiStepMigrator for () { + fn ongoing() -> bool { + false + } + + fn step() -> Weight { + Weight::zero() + } +} + +/// Multiple [`SteppedMigration`]. +pub trait SteppedMigrations { + /// The number of migrations that `Self` aggregates. + fn len() -> u32; + + /// The `n`th [`SteppedMigration::id`]. + /// + /// Is guaranteed to return `Some` if `n < Self::len()`. + fn nth_id(n: u32) -> Option>; + + /// The [`SteppedMigration::max_steps`] of the `n`th migration. + /// + /// Is guaranteed to return `Some` if `n < Self::len()`. + fn nth_max_steps(n: u32) -> Option>; + + /// Do a [`SteppedMigration::step`] on the `n`th migration. + /// + /// Is guaranteed to return `Some` if `n < Self::len()`. + fn nth_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>>; + + /// Do a [`SteppedMigration::transactional_step`] on the `n`th migration. + /// + /// Is guaranteed to return `Some` if `n < Self::len()`. + fn nth_transactional_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>>; + + /// The maximal encoded length across all cursors. + fn cursor_max_encoded_len() -> usize; + + /// The maximal encoded length across all identifiers. + fn identifier_max_encoded_len() -> usize; + + /// Assert the integrity of the migrations. + /// + /// Should be executed as part of a test prior to runtime usage. May or may not need + /// externalities. + #[cfg(feature = "std")] + fn integrity_test() -> Result<(), &'static str> { + use crate::ensure; + let l = Self::len(); + + for n in 0..l { + ensure!(Self::nth_id(n).is_some(), "id is None"); + ensure!(Self::nth_max_steps(n).is_some(), "steps is None"); + + // The cursor that we use does not matter. Hence use empty. + ensure!( + Self::nth_step(n, Some(vec![]), &mut WeightMeter::new()).is_some(), + "steps is None" + ); + ensure!( + Self::nth_transactional_step(n, Some(vec![]), &mut WeightMeter::new()).is_some(), + "steps is None" + ); + } + + Ok(()) + } +} + +impl SteppedMigrations for () { + fn len() -> u32 { + 0 + } + + fn nth_id(_n: u32) -> Option> { + None + } + + fn nth_max_steps(_n: u32) -> Option> { + None + } + + fn nth_step( + _n: u32, + _cursor: Option>, + _meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + None + } + + fn nth_transactional_step( + _n: u32, + _cursor: Option>, + _meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + None + } + + fn cursor_max_encoded_len() -> usize { + 0 + } + + fn identifier_max_encoded_len() -> usize { + 0 + } +} + +// A collection consisting of only a single migration. +impl SteppedMigrations for T { + fn len() -> u32 { + 1 + } + + fn nth_id(_n: u32) -> Option> { + Some(T::id().encode()) + } + + fn nth_max_steps(n: u32) -> Option> { + // It should be generally fine to call with n>0, but the code should not attempt to. + n.is_zero() + .then_some(T::max_steps()) + .defensive_proof("nth_max_steps should only be called with n==0") + } + + fn nth_step( + _n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + if !_n.is_zero() { + defensive!("nth_step should only be called with n==0"); + return None + } + + let cursor = match cursor { + Some(cursor) => match T::Cursor::decode(&mut &cursor[..]) { + Ok(cursor) => Some(cursor), + Err(_) => return Some(Err(SteppedMigrationError::InvalidCursor)), + }, + None => None, + }; + + Some(T::step(cursor, meter).map(|cursor| cursor.map(|cursor| cursor.encode()))) + } + + fn nth_transactional_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + if n != 0 { + defensive!("nth_transactional_step should only be called with n==0"); + return None + } + + let cursor = match cursor { + Some(cursor) => match T::Cursor::decode(&mut &cursor[..]) { + Ok(cursor) => Some(cursor), + Err(_) => return Some(Err(SteppedMigrationError::InvalidCursor)), + }, + None => None, + }; + + Some( + T::transactional_step(cursor, meter).map(|cursor| cursor.map(|cursor| cursor.encode())), + ) + } + + fn cursor_max_encoded_len() -> usize { + T::Cursor::max_encoded_len() + } + + fn identifier_max_encoded_len() -> usize { + T::Identifier::max_encoded_len() + } +} + +#[impl_trait_for_tuples::impl_for_tuples(1, 30)] +impl SteppedMigrations for Tuple { + fn len() -> u32 { + for_tuples!( #( Tuple::len() )+* ) + } + + fn nth_id(n: u32) -> Option> { + let mut i = 0; + + for_tuples!( #( + if (i + Tuple::len()) > n { + return Tuple::nth_id(n - i) + } + + i += Tuple::len(); + )* ); + + None + } + + fn nth_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + let mut i = 0; + + for_tuples!( #( + if (i + Tuple::len()) > n { + return Tuple::nth_step(n - i, cursor, meter) + } + + i += Tuple::len(); + )* ); + + None + } + + fn nth_transactional_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + let mut i = 0; + + for_tuples! ( #( + if (i + Tuple::len()) > n { + return Tuple::nth_transactional_step(n - i, cursor, meter) + } + + i += Tuple::len(); + )* ); + + None + } + + fn nth_max_steps(n: u32) -> Option> { + let mut i = 0; + + for_tuples!( #( + if (i + Tuple::len()) > n { + return Tuple::nth_max_steps(n - i) + } + + i += Tuple::len(); + )* ); + + None + } + + fn cursor_max_encoded_len() -> usize { + let mut max_len = 0; + + for_tuples!( #( + max_len = max_len.max(Tuple::cursor_max_encoded_len()); + )* ); + + max_len + } + + fn identifier_max_encoded_len() -> usize { + let mut max_len = 0; + + for_tuples!( #( + max_len = max_len.max(Tuple::identifier_max_encoded_len()); + )* ); + + max_len + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{assert_ok, storage::unhashed}; + + #[derive(Decode, Encode, MaxEncodedLen, Eq, PartialEq)] + pub enum Either { + Left(L), + Right(R), + } + + pub struct M0; + impl SteppedMigration for M0 { + type Cursor = (); + type Identifier = u8; + + fn id() -> Self::Identifier { + 0 + } + + fn step( + _cursor: Option, + _meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + log::info!("M0"); + unhashed::put(&[0], &()); + Ok(None) + } + } + + pub struct M1; + impl SteppedMigration for M1 { + type Cursor = (); + type Identifier = u8; + + fn id() -> Self::Identifier { + 1 + } + + fn step( + _cursor: Option, + _meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + log::info!("M1"); + unhashed::put(&[1], &()); + Ok(None) + } + + fn max_steps() -> Option { + Some(1) + } + } + + pub struct M2; + impl SteppedMigration for M2 { + type Cursor = (); + type Identifier = u8; + + fn id() -> Self::Identifier { + 2 + } + + fn step( + _cursor: Option, + _meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + log::info!("M2"); + unhashed::put(&[2], &()); + Ok(None) + } + + fn max_steps() -> Option { + Some(2) + } + } + + pub struct F0; + impl SteppedMigration for F0 { + type Cursor = (); + type Identifier = u8; + + fn id() -> Self::Identifier { + 3 + } + + fn step( + _cursor: Option, + _meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + log::info!("F0"); + unhashed::put(&[3], &()); + Err(SteppedMigrationError::Failed) + } + } + + // Three migrations combined to execute in order: + type Triple = (M0, (M1, M2)); + // Six migrations, just concatenating the ones from before: + type Hextuple = (Triple, Triple); + + #[test] + fn singular_migrations_work() { + assert_eq!(M0::max_steps(), None); + assert_eq!(M1::max_steps(), Some(1)); + assert_eq!(M2::max_steps(), Some(2)); + + assert_eq!(<(M0, M1)>::nth_max_steps(0), Some(None)); + assert_eq!(<(M0, M1)>::nth_max_steps(1), Some(Some(1))); + assert_eq!(<(M0, M1, M2)>::nth_max_steps(2), Some(Some(2))); + + assert_eq!(<(M0, M1)>::nth_max_steps(2), None); + } + + #[test] + fn tuple_migrations_work() { + assert_eq!(<() as SteppedMigrations>::len(), 0); + assert_eq!(<((), ((), ())) as SteppedMigrations>::len(), 0); + assert_eq!(::len(), 3); + assert_eq!(::len(), 6); + + // Check the IDs. The index specific functions all return an Option, + // to account for the out-of-range case. + assert_eq!(::nth_id(0), Some(0u8.encode())); + assert_eq!(::nth_id(1), Some(1u8.encode())); + assert_eq!(::nth_id(2), Some(2u8.encode())); + + sp_io::TestExternalities::default().execute_with(|| { + for n in 0..3 { + ::nth_step( + n, + Default::default(), + &mut WeightMeter::new(), + ); + } + }); + } + + #[test] + fn integrity_test_works() { + sp_io::TestExternalities::default().execute_with(|| { + assert_ok!(<() as SteppedMigrations>::integrity_test()); + assert_ok!(::integrity_test()); + assert_ok!(::integrity_test()); + assert_ok!(::integrity_test()); + assert_ok!(::integrity_test()); + assert_ok!(::integrity_test()); + }); + } + + #[test] + fn transactional_rollback_works() { + sp_io::TestExternalities::default().execute_with(|| { + assert_ok!(<(M0, F0) as SteppedMigrations>::nth_transactional_step( + 0, + Default::default(), + &mut WeightMeter::new() + ) + .unwrap()); + assert!(unhashed::exists(&[0])); + + let _g = crate::StorageNoopGuard::new(); + assert!(<(M0, F0) as SteppedMigrations>::nth_transactional_step( + 1, + Default::default(), + &mut WeightMeter::new() + ) + .unwrap() + .is_err()); + assert!(<(F0, M1) as SteppedMigrations>::nth_transactional_step( + 0, + Default::default(), + &mut WeightMeter::new() + ) + .unwrap() + .is_err()); + }); + } +} diff --git a/substrate/frame/support/src/storage/transactional.rs b/substrate/frame/support/src/storage/transactional.rs index d42e1809e91292a8e0ad367af4e203b8b1b17f87..0671db4a3a86bc76f7706df6eefe28a8c34c0701 100644 --- a/substrate/frame/support/src/storage/transactional.rs +++ b/substrate/frame/support/src/storage/transactional.rs @@ -127,6 +127,22 @@ where } } +/// Same as [`with_transaction`] but casts any internal error to `()`. +/// +/// This rids `E` of the `From` bound that is required by `with_transaction`. +pub fn with_transaction_opaque_err(f: F) -> Result, ()> +where + F: FnOnce() -> TransactionOutcome>, +{ + with_transaction(move || -> TransactionOutcome, DispatchError>> { + match f() { + TransactionOutcome::Commit(res) => TransactionOutcome::Commit(Ok(res)), + TransactionOutcome::Rollback(res) => TransactionOutcome::Rollback(Ok(res)), + } + }) + .map_err(|_| ()) +} + /// Same as [`with_transaction`] but without a limit check on nested transactional layers. /// /// This is mostly for backwards compatibility before there was a transactional layer limit. diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 3d0429f71b11d6c74f87ef33521634f46b0d591c..1997d8fc223efe7ff1d1be17e2bc10fb973d2cb9 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -59,10 +59,10 @@ pub use misc::{ AccountTouch, Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, DefensiveMax, DefensiveMin, DefensiveSaturating, DefensiveTruncateFrom, EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, - ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsSubType, IsType, - Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, SameOrOther, Time, - TryCollect, TryDrop, TypedGet, UnixTime, VariantCount, VariantCountOf, WrapperKeepOpaque, - WrapperOpaque, + ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsInherent, + IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, + SameOrOther, Time, TryCollect, TryDrop, TypedGet, UnixTime, VariantCount, VariantCountOf, + WrapperKeepOpaque, WrapperOpaque, }; #[allow(deprecated)] pub use misc::{PreimageProvider, PreimageRecipient}; @@ -86,7 +86,8 @@ mod hooks; pub use hooks::GenesisBuild; pub use hooks::{ BeforeAllRuntimeMigrations, BuildGenesisConfig, Hooks, IntegrityTest, OnFinalize, OnGenesis, - OnIdle, OnInitialize, OnRuntimeUpgrade, OnTimestampSet, + OnIdle, OnInitialize, OnPoll, OnRuntimeUpgrade, OnTimestampSet, PostInherents, + PostTransactions, PreInherents, }; pub mod schedule; diff --git a/substrate/frame/support/src/traits/dispatch.rs b/substrate/frame/support/src/traits/dispatch.rs index de50ce7a26c211190ddcb81976d39b9779540756..212c3080352d7a4350a2638ae9204cc0020e4e22 100644 --- a/substrate/frame/support/src/traits/dispatch.rs +++ b/substrate/frame/support/src/traits/dispatch.rs @@ -482,7 +482,7 @@ pub trait OriginTrait: Sized { type Call; /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin: Into + CallerTrait + MaxEncodedLen; + type PalletsOrigin: Send + Sync + Into + CallerTrait + MaxEncodedLen; /// The AccountId used across the system. type AccountId; @@ -496,6 +496,14 @@ pub trait OriginTrait: Sized { /// Replace the caller with caller from the other origin fn set_caller_from(&mut self, other: impl Into); + /// Replace the caller with caller from the other origin + fn set_caller(&mut self, caller: Self::PalletsOrigin); + + /// Replace the caller with caller from the other origin + fn set_caller_from_signed(&mut self, caller_account: Self::AccountId) { + self.set_caller(Self::PalletsOrigin::from(RawOrigin::Signed(caller_account))) + } + /// Filter the call if caller is not root, if false is returned then the call must be filtered /// out. /// @@ -544,6 +552,17 @@ pub trait OriginTrait: Sized { fn as_system_ref(&self) -> Option<&RawOrigin> { self.caller().as_system_ref() } + + /// Extract a reference to the sytsem signer, if that's what the caller is. + fn as_system_signer(&self) -> Option<&Self::AccountId> { + self.caller().as_system_ref().and_then(|s| { + if let RawOrigin::Signed(ref who) = s { + Some(who) + } else { + None + } + }) + } } #[cfg(test)] diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index 20788ce932d8b397990767c6ae8a0d82016cccbd..7d0e5aa1e89ca1d76e81d526881041c09c6e6cf3 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -25,10 +25,74 @@ use crate::weights::Weight; use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::traits::AtLeast32BitUnsigned; use sp_std::prelude::*; +use sp_weights::WeightMeter; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; +/// Provides a callback to execute logic before the all inherents. +pub trait PreInherents { + /// Called before all inherents were applied but after `on_initialize`. + fn pre_inherents() {} +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl PreInherents for Tuple { + fn pre_inherents() { + for_tuples!( #( Tuple::pre_inherents(); )* ); + } +} + +/// Provides a callback to execute logic after the all inherents. +pub trait PostInherents { + /// Called after all inherents were applied. + fn post_inherents() {} +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl PostInherents for Tuple { + fn post_inherents() { + for_tuples!( #( Tuple::post_inherents(); )* ); + } +} + +/// Provides a callback to execute logic before the all transactions. +pub trait PostTransactions { + /// Called after all transactions were applied but before `on_finalize`. + fn post_transactions() {} +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl PostTransactions for Tuple { + fn post_transactions() { + for_tuples!( #( Tuple::post_transactions(); )* ); + } +} + +/// Periodically executes logic. Is not guaranteed to run within a specific timeframe and should +/// only be used on logic that has no deadline. +pub trait OnPoll { + /// Code to execute every now and then at the beginning of the block after inherent application. + /// + /// The remaining weight limit must be respected. + fn on_poll(_n: BlockNumber, _weight: &mut WeightMeter) {} +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl OnPoll for Tuple { + fn on_poll(n: BlockNumber, weight: &mut WeightMeter) { + for_tuples!( #( Tuple::on_poll(n.clone(), weight); )* ); + } +} + /// See [`Hooks::on_initialize`]. pub trait OnInitialize { /// See [`Hooks::on_initialize`]. @@ -90,7 +154,7 @@ impl OnIdle for Tuple { /// /// Implementing this trait for a pallet let's you express operations that should /// happen at genesis. It will be called in an externalities provided environment and -/// will see the genesis state after all pallets have written their genesis state. +/// will set the genesis state after all pallets have written their genesis state. #[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] @@ -306,19 +370,23 @@ pub trait IntegrityTest { /// end /// ``` /// -/// * `OnRuntimeUpgrade` is only executed before everything else if a code -/// * `OnRuntimeUpgrade` is mandatorily at the beginning of the block body (extrinsics) being -/// processed. change is detected. -/// * Extrinsics start with inherents, and continue with other signed or unsigned extrinsics. -/// * `OnIdle` optionally comes after extrinsics. -/// `OnFinalize` mandatorily comes after `OnIdle`. +/// * [`OnRuntimeUpgrade`](Hooks::OnRuntimeUpgrade) hooks are only executed when a code change is +/// detected. +/// * [`OnRuntimeUpgrade`](Hooks::OnRuntimeUpgrade) hooks are mandatorily executed at the very +/// beginning of the block body, before any extrinsics are processed. +/// * [`Inherents`](sp_inherents) are always executed before any other other signed or unsigned +/// extrinsics. +/// * [`OnIdle`](Hooks::OnIdle) hooks are executed after extrinsics if there is weight remaining in +/// the block. +/// * [`OnFinalize`](Hooks::OnFinalize) hooks are mandatorily executed after +/// [`OnIdle`](Hooks::OnIdle). /// -/// > `OffchainWorker` is not part of this flow, as it is not really part of the consensus/main -/// > block import path, and is called optionally, and in other circumstances. See -/// > [`crate::traits::misc::OffchainWorker`] for more information. +/// > [`OffchainWorker`](crate::traits::misc::OffchainWorker) hooks are not part of this flow, +/// > because they are not part of the consensus/main block building logic. See +/// > [`OffchainWorker`](crate::traits::misc::OffchainWorker) for more information. /// -/// To learn more about the execution of hooks see `frame-executive` as this component is is charge -/// of dispatching extrinsics and placing the hooks in the correct order. +/// To learn more about the execution of hooks see the FRAME `Executive` pallet which is in charge +/// of dispatching extrinsics and calling hooks in the correct order. pub trait Hooks { /// Block initialization hook. This is called at the very beginning of block execution. /// @@ -370,30 +438,44 @@ pub trait Hooks { Weight::zero() } - /// Hook executed when a code change (aka. a "runtime upgrade") is detected by FRAME. + /// A hook to run logic after inherent application. + /// + /// Is not guaranteed to execute in a block and should therefore only be used in no-deadline + /// scenarios. + fn on_poll(_n: BlockNumber, _weight: &mut WeightMeter) {} + + /// Hook executed when a code change (aka. a "runtime upgrade") is detected by the FRAME + /// `Executive` pallet. /// /// Be aware that this is called before [`Hooks::on_initialize`] of any pallet; therefore, a lot /// of the critical storage items such as `block_number` in system pallet might have not been - /// set. + /// set yet. /// - /// Vert similar to [`Hooks::on_initialize`], any code in this block is mandatory and MUST - /// execute. Use with care. + /// Similar to [`Hooks::on_initialize`], any code in this block is mandatory and MUST execute. + /// It is strongly recommended to dry-run the execution of these hooks using + /// [try-runtime-cli](https://github.com/paritytech/try-runtime-cli) to ensure they will not + /// produce and overweight block which can brick your chain. Use with care! /// - /// ## Implementation Note: Versioning + /// ## Implementation Note: Standalone Migrations /// - /// 1. An implementation of this should typically follow a pattern where the version of the - /// pallet is checked against the onchain version, and a decision is made about what needs to be - /// done. This is helpful to prevent accidental repetitive execution of this hook, which can be - /// catastrophic. + /// Additional migrations can be created by directly implementing [`OnRuntimeUpgrade`] on + /// structs and passing them to `Executive`. /// - /// Alternatively, [`frame_support::migrations::VersionedMigration`] can be used to assist with - /// this. + /// ## Implementation Note: Pallet Versioning /// - /// ## Implementation Note: Runtime Level Migration + /// Implementations of this hook are typically wrapped in + /// [`crate::migrations::VersionedMigration`] to ensure the migration is executed exactly + /// once and only when it is supposed to. /// - /// Additional "upgrade hooks" can be created by pallets by a manual implementation of - /// [`Hooks::on_runtime_upgrade`] which can be passed on to `Executive` at the top level - /// runtime. + /// Alternatively, developers can manually implement version checks. + /// + /// Failure to adequately check storage versions can result in accidental repetitive execution + /// of the hook, which can be catastrophic. + /// + /// ## Implementation Note: Weight + /// + /// Typically, implementations of this method are simple enough that weights can be calculated + /// manually. However, if required, a benchmark can also be used. fn on_runtime_upgrade() -> Weight { Weight::zero() } @@ -403,7 +485,7 @@ pub trait Hooks { /// It should focus on certain checks to ensure that the state is sensible. This is never /// executed in a consensus code-path, therefore it can consume as much weight as it needs. /// - /// This hook should not alter any storage. + /// This hook must not alter any storage. #[cfg(feature = "try-runtime")] fn try_state(_n: BlockNumber) -> Result<(), TryRuntimeError> { Ok(()) @@ -415,7 +497,7 @@ pub trait Hooks { /// which will be passed to `post_upgrade` after upgrading for post-check. An empty vector /// should be returned if there is no such need. /// - /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + /// This hook is never executed on-chain but instead used by testing tools. #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { Ok(Vec::new()) @@ -434,7 +516,7 @@ pub trait Hooks { } /// Implementing this function on a pallet allows you to perform long-running tasks that are - /// dispatched as separate threads, and entirely independent of the main wasm runtime. + /// dispatched as separate threads, and entirely independent of the main blockchain execution. /// /// This function can freely read from the state, but any change it makes to the state is /// meaningless. Writes can be pushed back to the chain by submitting extrinsics from the diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index 995ac4f717911195e4dba202d350c6cbc09ae340..f3d893bcc1d899fce06ef00a38d687bfb3ea0181 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -123,6 +123,8 @@ impl ServiceQueues for NoopServiceQueues { pub struct QueueFootprint { /// The number of pages in the queue (including overweight pages). pub pages: u32, + /// The number of pages that are ready (not yet processed and also not overweight). + pub ready_pages: u32, /// The storage footprint of the queue (including overweight messages). pub storage: Footprint, } diff --git a/substrate/frame/support/src/traits/metadata.rs b/substrate/frame/support/src/traits/metadata.rs index 586af20511a89f82caf33297177a12f6d0988bf4..8bda4186bc967b29a6342ea96dd0a1cdc5072438 100644 --- a/substrate/frame/support/src/traits/metadata.rs +++ b/substrate/frame/support/src/traits/metadata.rs @@ -261,41 +261,64 @@ impl Add for StorageVersion { } } -/// Special marker struct if no storage version is set for a pallet. +/// Special marker struct used when [`storage_version`](crate::pallet_macros::storage_version) is +/// not defined for a pallet. /// /// If you (the reader) end up here, it probably means that you tried to compare /// [`GetStorageVersion::on_chain_storage_version`] against -/// [`GetStorageVersion::current_storage_version`]. This basically means that the -/// [`storage_version`](crate::pallet_macros::storage_version) is missing in the pallet where the -/// mentioned functions are being called. +/// [`GetStorageVersion::in_code_storage_version`]. This basically means that the +/// [`storage_version`](crate::pallet_macros::storage_version) is missing from the pallet where the +/// mentioned functions are being called, and needs to be defined. #[derive(Debug, Default)] pub struct NoStorageVersionSet; -/// Provides information about the storage version of a pallet. +/// Provides information about a pallet's storage versions. /// -/// It differentiates between current and on-chain storage version. Both should be only out of sync -/// when a new runtime upgrade was applied and the runtime migrations did not yet executed. -/// Otherwise it means that the pallet works with an unsupported storage version and unforeseen -/// stuff can happen. +/// Every pallet has two storage versions: +/// 1. An in-code storage version +/// 2. An on-chain storage version /// -/// The current storage version is the version of the pallet as supported at runtime. The active -/// storage version is the version of the pallet in the storage. +/// The in-code storage version is the version of the pallet as defined in the runtime blob, and the +/// on-chain storage version is the version of the pallet stored on-chain. /// -/// It is required to update the on-chain storage version manually when a migration was applied. +/// Storage versions should be only ever be out of sync when a pallet has been updated to a new +/// version and the in-code version is incremented, but the migration has not yet been executed +/// on-chain as part of a runtime upgrade. +/// +/// It is the responsibility of the developer to ensure that the on-chain storage version is set +/// correctly during a migration so that it matches the in-code storage version. pub trait GetStorageVersion { - /// This will be filled out by the [`pallet`](crate::pallet) macro. + /// This type is generated by the [`pallet`](crate::pallet) macro. + /// + /// If the [`storage_version`](crate::pallet_macros::storage_version) attribute isn't specified, + /// this is set to [`NoStorageVersionSet`] to signify that it is missing. + /// + /// If the [`storage_version`](crate::pallet_macros::storage_version) attribute is specified, + /// this is be set to a [`StorageVersion`] corresponding to the attribute. /// - /// If the [`storage_version`](crate::pallet_macros::storage_version) attribute isn't given - /// this is set to [`NoStorageVersionSet`] to inform the user that the attribute is missing. - /// This should prevent that the user forgets to set a storage version when required. However, - /// this will only work when the user actually tries to call [`Self::current_storage_version`] - /// to compare it against the [`Self::on_chain_storage_version`]. If the attribute is given, - /// this will be set to [`StorageVersion`]. - type CurrentStorageVersion; - - /// Returns the current storage version as supported by the pallet. - fn current_storage_version() -> Self::CurrentStorageVersion; - /// Returns the on-chain storage version of the pallet as stored in the storage. + /// The intention of using [`NoStorageVersionSet`] instead of defaulting to a [`StorageVersion`] + /// of zero is to prevent developers from forgetting to set + /// [`storage_version`](crate::pallet_macros::storage_version) when it is required, like in the + /// case that they wish to compare the in-code storage version to the on-chain storage version. + type InCodeStorageVersion; + + #[deprecated( + note = "This method has been renamed to `in_code_storage_version` and will be removed after March 2024." + )] + /// DEPRECATED: Use [`Self::current_storage_version`] instead. + /// + /// Returns the in-code storage version as specified in the + /// [`storage_version`](crate::pallet_macros::storage_version) attribute, or + /// [`NoStorageVersionSet`] if the attribute is missing. + fn current_storage_version() -> Self::InCodeStorageVersion { + Self::in_code_storage_version() + } + + /// Returns the in-code storage version as specified in the + /// [`storage_version`](crate::pallet_macros::storage_version) attribute, or + /// [`NoStorageVersionSet`] if the attribute is missing. + fn in_code_storage_version() -> Self::InCodeStorageVersion; + /// Returns the storage version of the pallet as last set in the actual on-chain storage. fn on_chain_storage_version() -> StorageVersion; } diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index eafd9c8abdd2dfc4b4174723678d94aa963ad738..fba546ce21b5414aab43a406922bfd7197c12ca2 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -23,6 +23,7 @@ use impl_trait_for_tuples::impl_for_tuples; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_arithmetic::traits::{CheckedAdd, CheckedMul, CheckedSub, One, Saturating}; use sp_core::bounded::bounded_vec::TruncateFrom; + #[doc(hidden)] pub use sp_runtime::traits::{ ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, @@ -895,11 +896,21 @@ pub trait GetBacking { /// A trait to ensure the inherent are before non-inherent in a block. /// /// This is typically implemented on runtime, through `construct_runtime!`. -pub trait EnsureInherentsAreFirst { +pub trait EnsureInherentsAreFirst: + IsInherent<::Extrinsic> +{ /// Ensure the position of inherent is correct, i.e. they are before non-inherents. /// - /// On error return the index of the inherent with invalid position (counting from 0). - fn ensure_inherents_are_first(block: &Block) -> Result<(), u32>; + /// On error return the index of the inherent with invalid position (counting from 0). On + /// success it returns the index of the last inherent. `0` therefore means that there are no + /// inherents. + fn ensure_inherents_are_first(block: &Block) -> Result; +} + +/// A trait to check if an extrinsic is an inherent. +pub trait IsInherent { + /// Whether this extrinsic is an inherent. + fn is_inherent(ext: &Extrinsic) -> bool; } /// An extrinsic on which we can get access to call. @@ -908,24 +919,13 @@ pub trait ExtrinsicCall: sp_runtime::traits::Extrinsic { fn call(&self) -> &Self::Call; } -#[cfg(feature = "std")] -impl ExtrinsicCall for sp_runtime::testing::TestXt -where - Call: codec::Codec + Sync + Send + TypeInfo, - Extra: TypeInfo, -{ - fn call(&self) -> &Self::Call { - &self.call - } -} - impl ExtrinsicCall for sp_runtime::generic::UncheckedExtrinsic where Address: TypeInfo, Call: TypeInfo, Signature: TypeInfo, - Extra: sp_runtime::traits::SignedExtension + TypeInfo, + Extra: TypeInfo, { fn call(&self) -> &Self::Call { &self.function diff --git a/substrate/frame/support/src/transaction_extensions.rs b/substrate/frame/support/src/transaction_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..98d78b9b4e4777e4e379975c63a2757f06b47553 --- /dev/null +++ b/substrate/frame/support/src/transaction_extensions.rs @@ -0,0 +1,89 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Transaction extensions. + +use crate::{CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_io::hashing::blake2_256; +use sp_runtime::{ + impl_tx_ext_default, + traits::{ + transaction_extension::{TransactionExtensionBase, TransactionExtensionInterior}, + DispatchInfoOf, Dispatchable, IdentifyAccount, TransactionExtension, Verify, + }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, +}; + +#[derive( + CloneNoBound, EqNoBound, PartialEqNoBound, Encode, Decode, RuntimeDebugNoBound, TypeInfo, +)] +#[codec(encode_bound())] +#[codec(decode_bound())] +pub struct VerifyMultiSignature +where + V: TransactionExtensionInterior, + ::AccountId: TransactionExtensionInterior, +{ + signature: V, + account: ::AccountId, +} + +impl TransactionExtensionBase for VerifyMultiSignature +where + V: TransactionExtensionInterior, + ::AccountId: TransactionExtensionInterior, +{ + const IDENTIFIER: &'static str = "VerifyMultiSignature"; + type Implicit = (); +} + +impl TransactionExtension + for VerifyMultiSignature +where + V: TransactionExtensionInterior, + ::AccountId: TransactionExtensionInterior, + ::RuntimeOrigin: From::AccountId>>, +{ + type Val = (); + type Pre = (); + impl_tx_ext_default!(Call; Context; prepare); + + fn validate( + &self, + _origin: ::RuntimeOrigin, + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + _: &mut Context, + _: (), + inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + let msg = inherited_implication.using_encoded(blake2_256); + + if !self.signature.verify(&msg[..], &self.account) { + Err(InvalidTransaction::BadProof)? + } + // We clobber the original origin. Maybe we shuld check that it's none? + let origin = Some(self.account.clone()).into(); + Ok((ValidTransaction::default(), (), origin)) + } +} diff --git a/substrate/frame/support/src/weights/block_weights.rs b/substrate/frame/support/src/weights/block_weights.rs index 57a68554755abb007165e751df687d097cd48f17..647b619835757a380d37acd6c8003f758137ba93 100644 --- a/substrate/frame/support/src/weights/block_weights.rs +++ b/substrate/frame/support/src/weights/block_weights.rs @@ -15,24 +15,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16 (Y/M/D) -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01 (Y/M/D) +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `block`, LONG-NAME: `BlockExecution`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` -//! WEIGHT-PATH: `./frame/support/src/weights/` +//! WEIGHT-PATH: `./substrate/frame/support/src/weights/` //! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // overhead // --chain=dev -// --execution=wasm // --wasm-execution=compiled -// --weight-path=./frame/support/src/weights/ -// --header=./HEADER-APACHE2 +// --weight-path=./substrate/frame/support/src/weights/ +// --header=./substrate/HEADER-APACHE2 // --warmup=10 // --repeat=100 @@ -44,17 +43,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 376_949, 622_462 - /// Average: 390_584 - /// Median: 386_322 - /// Std-Dev: 24792.0 + /// Min, Max: 424_332, 493_017 + /// Average: 437_118 + /// Median: 434_920 + /// Std-Dev: 8798.01 /// /// Percentiles nanoseconds: - /// 99th: 433_299 - /// 95th: 402_688 - /// 75th: 391_645 + /// 99th: 460_074 + /// 95th: 451_580 + /// 75th: 440_307 pub const BlockExecutionWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(390_584), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(437_118), 0); } #[cfg(test)] diff --git a/substrate/frame/support/src/weights/extrinsic_weights.rs b/substrate/frame/support/src/weights/extrinsic_weights.rs index a304f089ff782c34ab9941705503c5e766bb08b0..99b392c4369a523ef2ce08bfdd9f9dfed86a9f94 100644 --- a/substrate/frame/support/src/weights/extrinsic_weights.rs +++ b/substrate/frame/support/src/weights/extrinsic_weights.rs @@ -15,24 +15,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16 (Y/M/D) -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01 (Y/M/D) +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `extrinsic`, LONG-NAME: `ExtrinsicBase`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` -//! WEIGHT-PATH: `./frame/support/src/weights/` +//! WEIGHT-PATH: `./substrate/frame/support/src/weights/` //! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // overhead // --chain=dev -// --execution=wasm // --wasm-execution=compiled -// --weight-path=./frame/support/src/weights/ -// --header=./HEADER-APACHE2 +// --weight-path=./substrate/frame/support/src/weights/ +// --header=./substrate/HEADER-APACHE2 // --warmup=10 // --repeat=100 @@ -44,17 +43,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 123_875, 128_419 - /// Average: 124_414 - /// Median: 124_332 - /// Std-Dev: 497.74 + /// Min, Max: 106_053, 107_403 + /// Average: 106_446 + /// Median: 106_415 + /// Std-Dev: 216.17 /// /// Percentiles nanoseconds: - /// 99th: 125_245 - /// 95th: 124_989 - /// 75th: 124_498 + /// 99th: 107_042 + /// 95th: 106_841 + /// 75th: 106_544 pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(124_414), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(106_446), 0); } #[cfg(test)] diff --git a/substrate/frame/support/test/tests/construct_runtime.rs b/substrate/frame/support/test/tests/construct_runtime.rs index b8341b25cb0985915706ccb68b870474118d33cb..83691451ccdf07833dd276441ea5cedf4066beaf 100644 --- a/substrate/frame/support/test/tests/construct_runtime.rs +++ b/substrate/frame/support/test/tests/construct_runtime.rs @@ -683,7 +683,7 @@ fn test_metadata() { name: "Version", ty: meta_type::(), value: RuntimeVersion::default().encode(), - docs: maybe_docs(vec![ " Get the chain's current version."]), + docs: maybe_docs(vec![ " Get the chain's in-code version."]), }, PalletConstantMetadata { name: "SS58Prefix", @@ -815,7 +815,7 @@ fn test_metadata() { ty: meta_type::(), version: 4, signed_extensions: vec![SignedExtensionMetadata { - identifier: "UnitSignedExtension", + identifier: "UnitTransactionExtension", ty: meta_type::<()>(), additional_signed: meta_type::<()>(), }], diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 61cfc07caedc37e1a8427e8fadc61406813c52e6..c677ddcd672bc048b96e0c7274c36ffccf31e5ef 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -347,7 +347,7 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied 26 | System: frame_system::{Pallet, Call, Storage, Config, Event}, | ^^^^^^ the trait `Config` is not implemented for `Runtime` | -note: required by a bound in `frame_system::GenesisConfig` +note: required by a bound in `GenesisConfig` --> $WORKSPACE/substrate/frame/system/src/lib.rs | | pub struct GenesisConfig { @@ -477,7 +477,7 @@ note: required because it appears within the type `RuntimeCall` | ||_- in this macro invocation ... | note: required by a bound in `frame_support::sp_runtime::traits::Dispatchable::Config` - --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs + --> $WORKSPACE/substrate/primitives/runtime/src/traits/mod.rs | | type Config; | ^^^^^^^^^^^^ required by this bound in `Dispatchable::Config` @@ -510,7 +510,7 @@ note: required because it appears within the type `RuntimeCall` | ||_- in this macro invocation ... | note: required by a bound in `frame_support::pallet_prelude::ValidateUnsigned::Call` - --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs + --> $WORKSPACE/substrate/primitives/runtime/src/traits/mod.rs | | type Call; | ^^^^^^^^^^ required by this bound in `ValidateUnsigned::Call` diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr index 3b6329c650fa65208e01c7d991e321f256b7eecf..6160f8234a350cc3ee0a0761cab0eed6ea022525 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr @@ -4,88 +4,24 @@ error: The number of pallets exceeds the maximum number of tuple elements. To in 67 | pub struct Runtime | ^^^ -error[E0412]: cannot find type `RuntimeCall` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:35:64 - | -35 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | ^^^^^^^^^^^ not found in this scope - | -help: you might be missing a type parameter - | -35 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | +++++++++++++ - -error[E0412]: cannot find type `Runtime` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:37:25 - | -37 | impl pallet::Config for Runtime {} - | ^^^^^^^ not found in this scope - -error[E0412]: cannot find type `Runtime` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:40:31 - | -40 | impl frame_system::Config for Runtime { - | ^^^^^^^ not found in this scope - -error[E0412]: cannot find type `RuntimeOrigin` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:42:23 - | -42 | type RuntimeOrigin = RuntimeOrigin; - | ^^^^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -42 | type RuntimeOrigin = Self::RuntimeOrigin; - | ++++++ - -error[E0412]: cannot find type `RuntimeCall` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:44:21 - | -44 | type RuntimeCall = RuntimeCall; - | ^^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -44 | type RuntimeCall = Self::RuntimeCall; - | ++++++ - -error[E0412]: cannot find type `RuntimeEvent` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:50:22 - | -50 | type RuntimeEvent = RuntimeEvent; - | ^^^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -50 | type RuntimeEvent = Self::RuntimeEvent; - | ++++++ - -error[E0412]: cannot find type `PalletInfo` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:56:20 - | -56 | type PalletInfo = PalletInfo; - | ^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -56 | type PalletInfo = Self::PalletInfo; - | ++++++ -help: consider importing one of these items - | -18 + use frame_benchmarking::__private::traits::PalletInfo; - | -18 + use frame_support::traits::PalletInfo; - | - -error[E0412]: cannot find type `RuntimeTask` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:39:1 - | -39 | #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in the macro `frame_system::config_preludes::TestDefaultConfig` which comes from the expansion of the macro `frame_support::macro_magic::forward_tokens_verbatim` (in Nightly builds, run with -Z macro-backtrace for more info) -help: you might have meant to use the associated type - --> $WORKSPACE/substrate/frame/system/src/lib.rs - | - | type Self::RuntimeTask = (); - | ++++++ +error: recursion limit reached while expanding `frame_support::__private::tt_return!` + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:22:1 + | +22 | / #[frame_support::pallet] +23 | | mod pallet { +24 | | #[pallet::config] +25 | | pub trait Config: frame_system::Config {} +... | +66 | |/ construct_runtime! { +67 | || pub struct Runtime +68 | || { +69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, +... || +180 | || } +181 | || } + | ||_^ + | |_| + | in this macro invocation + | + = help: consider increasing the recursion limit by adding a `#![recursion_limit = "256"]` attribute to your crate (`$CRATE`) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index a4c7ecf786588a782f96b3ae890c67973c2841fc..30005c07cb631dd48425117507006faa870866cd 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -53,8 +53,9 @@ error[E0599]: no function or associated item named `is_inherent` found for struc | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope - = note: the following trait defines an item `is_inherent`, perhaps you need to implement it: + = note: the following traits define an item `is_inherent`, perhaps you need to implement one of them: candidate #1: `ProvideInherent` + candidate #2: `IsInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `check_inherent` found for struct `pallet::Pallet` in the current scope @@ -119,3 +120,23 @@ error[E0599]: no function or associated item named `is_inherent_required` found = note: the following trait defines an item `is_inherent_required`, perhaps you need to implement it: candidate #1: `ProvideInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `pallet::Pallet: ProvideInherent` is not satisfied + --> tests/construct_runtime_ui/undefined_inherent_part.rs:70:3 + | +70 | Pallet: pallet expanded::{}::{Pallet, Inherent}, + | ^^^^^^ the trait `ProvideInherent` is not implemented for `pallet::Pallet` + +error[E0277]: the trait bound `pallet::Pallet: ProvideInherent` is not satisfied + --> tests/construct_runtime_ui/undefined_inherent_part.rs:66:1 + | +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } + | |_^ the trait `ProvideInherent` is not implemented for `pallet::Pallet` + | + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 9b4381c2f82bd4baa78fc8d175079a0e90480da3..4d2737d411ee8238124571d5c64260ac3dd79c03 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -209,7 +209,7 @@ pub mod pallet { where T::AccountId: From + From + SomeAssociation1, { - /// Doc comment put in metadata + /// call foo doc comment put in metadata #[pallet::call_index(0)] #[pallet::weight(Weight::from_parts(*foo as u64, 0))] pub fn foo( @@ -225,7 +225,7 @@ pub mod pallet { Ok(().into()) } - /// Doc comment put in metadata + /// call foo_storage_layer doc comment put in metadata #[pallet::call_index(1)] #[pallet::weight({1})] pub fn foo_storage_layer( @@ -270,7 +270,7 @@ pub mod pallet { #[pallet::error] #[derive(PartialEq, Eq)] pub enum Error { - /// doc comment put into metadata + /// error doc comment put in metadata InsufficientProposersBalance, NonExistentStorageValue, Code(u8), @@ -287,9 +287,8 @@ pub mod pallet { where T::AccountId: SomeAssociation1 + From, { - /// doc comment put in metadata + /// event doc comment put in metadata Proposed(::AccountId), - /// doc Spending(BalanceOf), Something(u32), SomethingElse(::_1), @@ -590,7 +589,7 @@ pub mod pallet2 { Self::deposit_event(Event::Something(31)); if UpdateStorageVersion::get() { - Self::current_storage_version().put::(); + Self::in_code_storage_version().put::(); } Weight::zero() @@ -744,14 +743,43 @@ impl pallet5::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; } +#[derive(Clone, Debug, codec::Encode, codec::Decode, PartialEq, Eq, scale_info::TypeInfo)] +pub struct AccountU64(u64); +impl sp_runtime::traits::IdentifyAccount for AccountU64 { + type AccountId = u64; + fn into_account(self) -> u64 { + self.0 + } +} + +impl sp_runtime::traits::Verify for AccountU64 { + type Signer = AccountU64; + fn verify>( + &self, + _msg: L, + _signer: &::AccountId, + ) -> bool { + true + } +} + +impl From for AccountU64 { + fn from(value: u64) -> Self { + Self(value) + } +} + pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = - sp_runtime::testing::TestXt>; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< + u64, + RuntimeCall, + AccountU64, + frame_system::CheckNonZeroSender, +>; frame_support::construct_runtime!( - pub struct Runtime - { + pub struct Runtime { // Exclude part `Storage` in order not to check its metadata in tests. System: frame_system exclude_parts { Pallet, Storage }, Example: pallet, @@ -772,6 +800,14 @@ fn _ensure_call_is_correctly_excluded_and_included(call: RuntimeCall) { } } +fn maybe_docs(doc: Vec<&'static str>) -> Vec<&'static str> { + if cfg!(feature = "no-metadata-docs") { + vec![] + } else { + doc + } +} + #[test] fn transactional_works() { TestExternalities::default().execute_with(|| { @@ -890,10 +926,8 @@ fn inherent_expand() { let inherents = InherentData::new().create_extrinsics(); - let expected = vec![UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }]; + let expected = + vec![UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {}))]; assert_eq!(expected, inherents); let block = Block::new( @@ -905,14 +939,11 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 0, + })), ], ); @@ -927,14 +958,11 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 0, bar: 0 }), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 0, + bar: 0, + })), ], ); @@ -948,10 +976,9 @@ fn inherent_expand() { BlakeTwo256::hash(b"test"), Digest::default(), ), - vec![UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), - signature: None, - }], + vec![UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_storage_layer { + foo: 0, + }))], ); let mut inherent = InherentData::new(); @@ -966,10 +993,12 @@ fn inherent_expand() { BlakeTwo256::hash(b"test"), Digest::default(), ), - vec![UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: Some((1, Default::default())), - }], + vec![UncheckedExtrinsic::new_signed( + RuntimeCall::Example(pallet::Call::foo_no_post_info {}), + 1, + 1.into(), + Default::default(), + )], ); let mut inherent = InherentData::new(); @@ -985,14 +1014,13 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 1, + })), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_storage_layer { + foo: 0, + })), ], ); @@ -1007,18 +1035,14 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 1, + })), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_storage_layer { + foo: 0, + })), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), ], ); @@ -1033,18 +1057,17 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), - signature: Some((1, Default::default())), - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 1, + })), + UncheckedExtrinsic::new_signed( + RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), + 1, + 1.into(), + Default::default(), + ), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), ], ); @@ -1310,7 +1333,7 @@ fn pallet_on_genesis() { assert_eq!(pallet::Pallet::::on_chain_storage_version(), StorageVersion::new(0)); pallet::Pallet::::on_genesis(); assert_eq!( - pallet::Pallet::::current_storage_version(), + pallet::Pallet::::in_code_storage_version(), pallet::Pallet::::on_chain_storage_version(), ); }) @@ -1362,19 +1385,47 @@ fn migrate_from_pallet_version_to_storage_version() { }); } +#[test] +fn pallet_item_docs_in_metadata() { + // call + let call_variants = match meta_type::>().type_info().type_def { + scale_info::TypeDef::Variant(variants) => variants.variants, + _ => unreachable!(), + }; + + assert_eq!(call_variants[0].docs, maybe_docs(vec!["call foo doc comment put in metadata"])); + assert_eq!( + call_variants[1].docs, + maybe_docs(vec!["call foo_storage_layer doc comment put in metadata"]) + ); + assert!(call_variants[2].docs.is_empty()); + + // event + let event_variants = match meta_type::>().type_info().type_def { + scale_info::TypeDef::Variant(variants) => variants.variants, + _ => unreachable!(), + }; + + assert_eq!(event_variants[0].docs, maybe_docs(vec!["event doc comment put in metadata"])); + assert!(event_variants[1].docs.is_empty()); + + // error + let error_variants = match meta_type::>().type_info().type_def { + scale_info::TypeDef::Variant(variants) => variants.variants, + _ => unreachable!(), + }; + + assert_eq!(error_variants[0].docs, maybe_docs(vec!["error doc comment put in metadata"])); + assert!(error_variants[1].docs.is_empty()); + + // storage is already covered in the main `fn metadata` test. +} + #[test] fn metadata() { use codec::Decode; use frame_metadata::{v15::*, *}; - fn maybe_docs(doc: Vec<&'static str>) -> Vec<&'static str> { - if cfg!(feature = "no-metadata-docs") { - vec![] - } else { - doc - } - } - let readme = "Support code for the runtime.\n\nLicense: Apache-2.0\n"; let expected_pallet_doc = vec![" Pallet documentation", readme, readme]; @@ -1899,7 +1950,7 @@ fn extrinsic_metadata_ir_types() { >(), ir.signature_ty ); - assert_eq!(meta_type::<()>(), ir.signature_ty); + assert_eq!(meta_type::(), ir.signature_ty); assert_eq!(meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureExtra>(), ir.extra_ty); assert_eq!(meta_type::>(), ir.extra_ty); @@ -2257,10 +2308,10 @@ fn pallet_on_chain_storage_version_initializes_correctly() { AllPalletsWithSystem, >; - // Simple example of a pallet with current version 10 being added to the runtime for the first + // Simple example of a pallet with in-code version 10 being added to the runtime for the first // time. TestExternalities::default().execute_with(|| { - let current_version = Example::current_storage_version(); + let in_code_version = Example::in_code_storage_version(); // Check the pallet has no storage items set. let pallet_hashed_prefix = twox_128(Example::name().as_bytes()); @@ -2271,14 +2322,14 @@ fn pallet_on_chain_storage_version_initializes_correctly() { // version. Executive::execute_on_runtime_upgrade(); - // Check that the storage version was initialized to the current version + // Check that the storage version was initialized to the in-code version let on_chain_version_after = StorageVersion::get::(); - assert_eq!(on_chain_version_after, current_version); + assert_eq!(on_chain_version_after, in_code_version); }); - // Pallet with no current storage version should have the on-chain version initialized to 0. + // Pallet with no in-code storage version should have the on-chain version initialized to 0. TestExternalities::default().execute_with(|| { - // Example4 current_storage_version is NoStorageVersionSet. + // Example4 in_code_storage_version is NoStorageVersionSet. // Check the pallet has no storage items set. let pallet_hashed_prefix = twox_128(Example4::name().as_bytes()); @@ -2308,7 +2359,7 @@ fn post_runtime_upgrade_detects_storage_version_issues() { impl OnRuntimeUpgrade for CustomUpgrade { fn on_runtime_upgrade() -> Weight { - Example2::current_storage_version().put::(); + Example2::in_code_storage_version().put::(); Default::default() } @@ -2351,14 +2402,14 @@ fn post_runtime_upgrade_detects_storage_version_issues() { >; TestExternalities::default().execute_with(|| { - // Set the on-chain version to one less than the current version for `Example`, simulating a + // Set the on-chain version to one less than the in-code version for `Example`, simulating a // forgotten migration StorageVersion::new(9).put::(); // The version isn't changed, we should detect it. assert!( Executive::try_runtime_upgrade(UpgradeCheckSelect::PreAndPost).unwrap_err() == - "On chain and current storage version do not match. Missing runtime upgrade?" + "On chain and in-code storage version do not match. Missing runtime upgrade?" .into() ); }); diff --git a/substrate/frame/support/test/tests/pallet_instance.rs b/substrate/frame/support/test/tests/pallet_instance.rs index f8cc97623b8de219900e5d436a2b00d8f3c6fa91..505400a7c217df1321c2a5f5a3746b6b4bc1d671 100644 --- a/substrate/frame/support/test/tests/pallet_instance.rs +++ b/substrate/frame/support/test/tests/pallet_instance.rs @@ -943,7 +943,7 @@ fn metadata() { ty: scale_info::meta_type::(), version: 4, signed_extensions: vec![SignedExtensionMetadata { - identifier: "UnitSignedExtension", + identifier: "UnitTransactionExtension", ty: scale_info::meta_type::<()>(), additional_signed: scale_info::meta_type::<()>(), }], diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs index 79e9d6786717a5a27717cda754575c2ecd478aa3..33b96dea9486396629e19274de8437f0ca3c3162 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs @@ -90,6 +90,7 @@ fn module_error_outer_enum_expand_explicit() { frame_system::Error::NonDefaultComposite => (), frame_system::Error::NonZeroRefCount => (), frame_system::Error::CallFiltered => (), + frame_system::Error::MultiBlockMigrationsOngoing => (), #[cfg(feature = "experimental")] frame_system::Error::InvalidTask => (), #[cfg(feature = "experimental")] diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs index 4bd8ee0bb39a574b2ff3f59b571c1209fc675da4..db006fe79359aa9f096963ce9333933329c23508 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs @@ -90,6 +90,7 @@ fn module_error_outer_enum_expand_implicit() { frame_system::Error::NonDefaultComposite => (), frame_system::Error::NonZeroRefCount => (), frame_system::Error::CallFiltered => (), + frame_system::Error::MultiBlockMigrationsOngoing => (), #[cfg(feature = "experimental")] frame_system::Error::InvalidTask => (), #[cfg(feature = "experimental")] diff --git a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs index 840a6dee20ccc7a836619a1ef334cca6e22e7887..d2ca9fc80991a87dbb6f20323f07d8adcd29449d 100644 --- a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs +++ b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs @@ -29,7 +29,7 @@ mod pallet { #[pallet::hooks] impl Hooks> for Pallet { fn on_runtime_upgrade() -> Weight { - if Self::current_storage_version() != Self::on_chain_storage_version() { + if Self::in_code_storage_version() != Self::on_chain_storage_version() { } diff --git a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr index 1b48197cc9ed1049ea0f52bb011fbc95654c59c0..3256e69528a2d28527ccfe1fa4d77dadfc77baa1 100644 --- a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr @@ -1,7 +1,7 @@ error[E0369]: binary operation `!=` cannot be applied to type `NoStorageVersionSet` --> tests/pallet_ui/compare_unset_storage_version.rs:32:39 | -32 | if Self::current_storage_version() != Self::on_chain_storage_version() { +32 | if Self::in_code_storage_version() != Self::on_chain_storage_version() { | ------------------------------- ^^ -------------------------------- StorageVersion | | | NoStorageVersionSet diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr index 7f125526edf26abddf82f2228da9a510200f0735..519fadaa6049cb26430719379d3f9847b81efb62 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr @@ -1,4 +1,4 @@ -error: expected one of: `getter`, `storage_prefix`, `unbounded`, `whitelist_storage` +error: expected one of: `getter`, `storage_prefix`, `unbounded`, `whitelist_storage`, `disable_try_decode_storage` --> tests/pallet_ui/storage_invalid_attribute.rs:33:12 | 33 | #[pallet::generate_store(pub trait Store)] diff --git a/substrate/frame/support/test/tests/runtime_metadata.rs b/substrate/frame/support/test/tests/runtime_metadata.rs index bb7f7d2822e7cf1a15c41de1ca47686dd8440bfc..40e70b219ba94062af2bb75391628c73bc3a3233 100644 --- a/substrate/frame/support/test/tests/runtime_metadata.rs +++ b/substrate/frame/support/test/tests/runtime_metadata.rs @@ -101,7 +101,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } @@ -200,8 +200,8 @@ fn runtime_metadata() { name: "header", ty: meta_type::<&::Header>(), }], - output: meta_type::<()>(), - docs: maybe_docs(vec![" Initialize a block with the given header."]), + output: meta_type::(), + docs: maybe_docs(vec![" Initialize a block with the given header and return the runtime executive mode."]), }, ], docs: maybe_docs(vec![ diff --git a/substrate/frame/system/benchmarking/src/extensions.rs b/substrate/frame/system/benchmarking/src/extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..1721501dead49756e7ad6c39187e43e69209517c --- /dev/null +++ b/substrate/frame/system/benchmarking/src/extensions.rs @@ -0,0 +1,213 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Benchmarks for System Extensions + +#![cfg(feature = "runtime-benchmarks")] + +use frame_benchmarking::{account, impl_benchmark_test_suite, v2::*, BenchmarkError}; +use frame_support::{ + dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, + weights::Weight, +}; +use frame_system::{ + pallet_prelude::*, CheckGenesis, CheckMortality, CheckNonZeroSender, CheckNonce, + CheckSpecVersion, CheckTxVersion, CheckWeight, Config, Pallet as System, RawOrigin, +}; +use sp_runtime::{ + generic::Era, + traits::{AsSystemOriginSigner, DispatchTransaction, Dispatchable, Get}, +}; +use sp_std::prelude::*; + +pub struct Pallet(System); + +#[benchmarks(where + T: Send + Sync, + T::RuntimeCall: Dispatchable, + ::RuntimeOrigin: AsSystemOriginSigner + Clone) +] +mod benchmarks { + use super::*; + + #[benchmark] + fn check_genesis() -> Result<(), BenchmarkError> { + let len = 0_usize; + let caller = account("caller", 0, 0); + let info = DispatchInfo { weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + CheckGenesis::::new() + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + + Ok(()) + } + + #[benchmark] + fn check_mortality() -> Result<(), BenchmarkError> { + let len = 0_usize; + let ext = CheckMortality::::from(Era::mortal(16, 256)); + let block_number: BlockNumberFor = 17u32.into(); + System::::set_block_number(block_number); + let prev_block: BlockNumberFor = 16u32.into(); + let default_hash: T::Hash = Default::default(); + frame_system::BlockHash::::insert(prev_block, default_hash); + let caller = account("caller", 0, 0); + let info = DispatchInfo { + weight: Weight::from_parts(100, 0), + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_non_zero_sender() -> Result<(), BenchmarkError> { + let len = 0_usize; + let ext = CheckNonZeroSender::::new(); + let caller = account("caller", 0, 0); + let info = DispatchInfo { weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_nonce() -> Result<(), BenchmarkError> { + let caller: T::AccountId = account("caller", 0, 0); + let mut info = frame_system::AccountInfo::default(); + info.nonce = 1u32.into(); + info.providers = 1; + let expected_nonce = info.nonce + 1u32.into(); + frame_system::Account::::insert(caller.clone(), info); + let len = 0_usize; + let ext = CheckNonce::::from(1u32.into()); + let info = DispatchInfo { weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, len, |_| { + Ok(().into()) + }) + .unwrap() + .unwrap(); + } + + let updated_info = frame_system::Account::::get(caller.clone()); + assert_eq!(updated_info.nonce, expected_nonce); + Ok(()) + } + + #[benchmark] + fn check_spec_version() -> Result<(), BenchmarkError> { + let len = 0_usize; + let caller = account("caller", 0, 0); + let info = DispatchInfo { weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + CheckSpecVersion::::new() + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_tx_version() -> Result<(), BenchmarkError> { + let len = 0_usize; + let caller = account("caller", 0, 0); + let info = DispatchInfo { weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + CheckTxVersion::::new() + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_weight() -> Result<(), BenchmarkError> { + let caller = account("caller", 0, 0); + let base_extrinsic = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + let info = DispatchInfo { + weight: Weight::from_parts(base_extrinsic.ref_time() * 5, 0), + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(base_extrinsic.ref_time() * 2, 0)), + pays_fee: Default::default(), + }; + let len = 0_usize; + let base_extrinsic = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + + let ext = CheckWeight::::new(); + + let initial_block_weight = Weight::from_parts(base_extrinsic.ref_time() * 2, 0); + frame_system::BlockWeight::::mutate(|current_weight| { + current_weight.set(Weight::zero(), DispatchClass::Mandatory); + current_weight.set(initial_block_weight, DispatchClass::Normal); + }); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(post_info)) + .unwrap() + .unwrap(); + } + + assert_eq!( + System::::block_weight().total(), + initial_block_weight + base_extrinsic + post_info.actual_weight.unwrap(), + ); + Ok(()) + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test,); +} diff --git a/substrate/frame/system/benchmarking/src/lib.rs b/substrate/frame/system/benchmarking/src/lib.rs index 18bfb85f52dfd4c2b6a5c8a57e78494d97fbdeeb..ebd16275bcbf0d4f1d84cc20da80fe97905816f8 100644 --- a/substrate/frame/system/benchmarking/src/lib.rs +++ b/substrate/frame/system/benchmarking/src/lib.rs @@ -28,6 +28,7 @@ use sp_core::storage::well_known_keys; use sp_runtime::traits::Hash; use sp_std::{prelude::*, vec}; +pub mod extensions; mod mock; pub struct Pallet(System); diff --git a/substrate/frame/system/benchmarking/src/mock.rs b/substrate/frame/system/benchmarking/src/mock.rs index edbf74a9a51ac9bd90ee65f208c7d96be08430e3..1f84dd530040619f4b902242edf29002f383c073 100644 --- a/substrate/frame/system/benchmarking/src/mock.rs +++ b/substrate/frame/system/benchmarking/src/mock.rs @@ -57,6 +57,7 @@ impl frame_system::Config for Test { type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type ExtensionsWeightInfo = (); type SS58Prefix = (); type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; diff --git a/substrate/frame/system/src/extensions/check_genesis.rs b/substrate/frame/system/src/extensions/check_genesis.rs index 76a711a823e7d7a4b1092d9220b846584b21603f..3f11f745cd9b7c509a6827100acae66413c80e5d 100644 --- a/substrate/frame/system/src/extensions/check_genesis.rs +++ b/substrate/frame/system/src/extensions/check_genesis.rs @@ -19,7 +19,8 @@ use crate::{pallet_prelude::BlockNumberFor, Config, Pallet}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension, Zero}, + impl_tx_ext_default, + traits::{TransactionExtension, TransactionExtensionBase, Zero}, transaction_validity::TransactionValidityError, }; @@ -46,30 +47,26 @@ impl sp_std::fmt::Debug for CheckGenesis { } impl CheckGenesis { - /// Creates new `SignedExtension` to check genesis hash. + /// Creates new `TransactionExtension` to check genesis hash. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckGenesis { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = T::Hash; - type Pre = (); +impl TransactionExtensionBase for CheckGenesis { const IDENTIFIER: &'static str = "CheckGenesis"; - - fn additional_signed(&self) -> Result { + type Implicit = T::Hash; + fn implicit(&self) -> Result { Ok(>::block_hash(BlockNumberFor::::zero())) } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn weight(&self) -> sp_weights::Weight { + ::check_genesis() } } +impl TransactionExtension + for CheckGenesis +{ + type Val = (); + type Pre = (); + impl_tx_ext_default!(T::RuntimeCall; Context; validate prepare); +} diff --git a/substrate/frame/system/src/extensions/check_mortality.rs b/substrate/frame/system/src/extensions/check_mortality.rs index 148dfd4aad471b8a51aa3106581531b17090c20a..deb44880d6441429cdd1513bdc4ac0fbfc6c0421 100644 --- a/substrate/frame/system/src/extensions/check_mortality.rs +++ b/substrate/frame/system/src/extensions/check_mortality.rs @@ -20,10 +20,12 @@ use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ generic::Era, - traits::{DispatchInfoOf, SaturatedConversion, SignedExtension}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + impl_tx_ext_default, + traits::{ + DispatchInfoOf, SaturatedConversion, TransactionExtension, TransactionExtensionBase, + ValidateResult, }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, }; /// Check for transaction mortality. @@ -54,29 +56,11 @@ impl sp_std::fmt::Debug for CheckMortality { } } -impl SignedExtension for CheckMortality { - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = T::Hash; - type Pre = (); +impl TransactionExtensionBase for CheckMortality { const IDENTIFIER: &'static str = "CheckMortality"; + type Implicit = T::Hash; - fn validate( - &self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - let current_u64 = >::block_number().saturated_into::(); - let valid_till = self.0.death(current_u64); - Ok(ValidTransaction { - longevity: valid_till.saturating_sub(current_u64), - ..Default::default() - }) - } - - fn additional_signed(&self) -> Result { + fn implicit(&self) -> Result { let current_u64 = >::block_number().saturated_into::(); let n = self.0.birth(current_u64).saturated_into::>(); if !>::contains_key(n) { @@ -85,16 +69,38 @@ impl SignedExtension for CheckMortality { Ok(>::block_hash(n)) } } + fn weight(&self) -> sp_weights::Weight { + ::check_mortality() + } +} +impl TransactionExtension + for CheckMortality +{ + type Pre = (); + type Val = (); - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn validate( + &self, + origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let current_u64 = >::block_number().saturated_into::(); + let valid_till = self.0.death(current_u64); + Ok(( + ValidTransaction { + longevity: valid_till.saturating_sub(current_u64), + ..Default::default() + }, + (), + origin, + )) } + impl_tx_ext_default!(T::RuntimeCall; Context; prepare); } #[cfg(test)] @@ -106,23 +112,21 @@ mod tests { weights::Weight, }; use sp_core::H256; + use sp_runtime::traits::DispatchTransaction; #[test] fn signed_ext_check_era_should_work() { new_test_ext().execute_with(|| { // future assert_eq!( - CheckMortality::::from(Era::mortal(4, 2)) - .additional_signed() - .err() - .unwrap(), + CheckMortality::::from(Era::mortal(4, 2)).implicit().err().unwrap(), InvalidTransaction::AncientBirthBlock.into(), ); // correct System::set_block_number(13); >::insert(12, H256::repeat_byte(1)); - assert!(CheckMortality::::from(Era::mortal(4, 12)).additional_signed().is_ok()); + assert!(CheckMortality::::from(Era::mortal(4, 12)).implicit().is_ok()); }) } @@ -142,7 +146,10 @@ mod tests { System::set_block_number(17); >::insert(16, H256::repeat_byte(1)); - assert_eq!(ext.validate(&1, CALL, &normal, len).unwrap().longevity, 15); + assert_eq!( + ext.validate_only(Some(1).into(), CALL, &normal, len).unwrap().0.longevity, + 15 + ); }) } } diff --git a/substrate/frame/system/src/extensions/check_non_zero_sender.rs b/substrate/frame/system/src/extensions/check_non_zero_sender.rs index 92eed60fc66b53dec19a86c1fac20f1af8ff4d7b..115ffac9b038a722c404cb714442306dc704a75d 100644 --- a/substrate/frame/system/src/extensions/check_non_zero_sender.rs +++ b/substrate/frame/system/src/extensions/check_non_zero_sender.rs @@ -17,13 +17,14 @@ use crate::Config; use codec::{Decode, Encode}; -use frame_support::{dispatch::DispatchInfo, DefaultNoBound}; +use frame_support::{traits::OriginTrait, DefaultNoBound}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, SignedExtension}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + impl_tx_ext_default, + traits::{ + transaction_extension::TransactionExtensionBase, DispatchInfoOf, TransactionExtension, }, + transaction_validity::InvalidTransaction, }; use sp_std::{marker::PhantomData, prelude::*}; @@ -45,66 +46,82 @@ impl sp_std::fmt::Debug for CheckNonZeroSender { } impl CheckNonZeroSender { - /// Create new `SignedExtension` to check runtime version. + /// Create new `TransactionExtension` to check runtime version. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckNonZeroSender -where - T::RuntimeCall: Dispatchable, -{ - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); +impl TransactionExtensionBase for CheckNonZeroSender { const IDENTIFIER: &'static str = "CheckNonZeroSender"; - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) + type Implicit = (); + fn weight(&self) -> sp_weights::Weight { + ::check_non_zero_sender() } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } - +} +impl TransactionExtension + for CheckNonZeroSender +{ + type Val = (); + type Pre = (); fn validate( &self, - who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { - if who.using_encoded(|d| d.iter().all(|x| *x == 0)) { - return Err(TransactionValidityError::Invalid(InvalidTransaction::BadSigner)) + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> sp_runtime::traits::ValidateResult { + if let Some(who) = origin.as_system_signer() { + if who.using_encoded(|d| d.iter().all(|x| *x == 0)) { + return Err(InvalidTransaction::BadSigner.into()) + } } - Ok(ValidTransaction::default()) + Ok((Default::default(), (), origin)) } + impl_tx_ext_default!(T::RuntimeCall; Context; prepare); } #[cfg(test)] mod tests { use super::*; use crate::mock::{new_test_ext, Test, CALL}; - use frame_support::{assert_noop, assert_ok}; + use frame_support::{assert_ok, dispatch::DispatchInfo}; + use sp_runtime::{traits::DispatchTransaction, TransactionValidityError}; #[test] fn zero_account_ban_works() { new_test_ext().execute_with(|| { let info = DispatchInfo::default(); let len = 0_usize; - assert_noop!( - CheckNonZeroSender::::new().validate(&0, CALL, &info, len), - InvalidTransaction::BadSigner + assert_eq!( + CheckNonZeroSender::::new() + .validate_only(Some(0).into(), CALL, &info, len) + .unwrap_err(), + TransactionValidityError::from(InvalidTransaction::BadSigner) ); - assert_ok!(CheckNonZeroSender::::new().validate(&1, CALL, &info, len)); + assert_ok!(CheckNonZeroSender::::new().validate_only( + Some(1).into(), + CALL, + &info, + len + )); + }) + } + + #[test] + fn unsigned_origin_works() { + new_test_ext().execute_with(|| { + let info = DispatchInfo::default(); + let len = 0_usize; + assert_ok!(CheckNonZeroSender::::new().validate_only( + None.into(), + CALL, + &info, + len + )); }) } } diff --git a/substrate/frame/system/src/extensions/check_nonce.rs b/substrate/frame/system/src/extensions/check_nonce.rs index 7504a814aef13b0230e1067c0d95e0104ad74275..ead9d9f7c8177ca1392b092b9418b3b2ae0b93ea 100644 --- a/substrate/frame/system/src/extensions/check_nonce.rs +++ b/substrate/frame/system/src/extensions/check_nonce.rs @@ -15,16 +15,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::Config; +use crate::{AccountInfo, Config}; use codec::{Decode, Encode}; use frame_support::dispatch::DispatchInfo; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, One, SignedExtension, Zero}, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, One, TransactionExtension, + TransactionExtensionBase, ValidateResult, Zero, + }, transaction_validity::{ - InvalidTransaction, TransactionLongevity, TransactionValidity, TransactionValidityError, - ValidTransaction, + InvalidTransaction, TransactionLongevity, TransactionValidityError, ValidTransaction, }, + Saturating, }; use sp_std::vec; @@ -58,75 +61,78 @@ impl sp_std::fmt::Debug for CheckNonce { } } -impl SignedExtension for CheckNonce +impl TransactionExtensionBase for CheckNonce { + const IDENTIFIER: &'static str = "CheckNonce"; + type Implicit = (); + fn weight(&self) -> sp_weights::Weight { + ::check_nonce() + } +} +impl TransactionExtension for CheckNonce where T::RuntimeCall: Dispatchable, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, { - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); + type Val = Option<(T::AccountId, AccountInfo)>; type Pre = (); - const IDENTIFIER: &'static str = "CheckNonce"; - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result<(), TransactionValidityError> { - let mut account = crate::Account::::get(who); - if account.providers.is_zero() && account.sufficients.is_zero() { - // Nonce storage not paid for - return Err(InvalidTransaction::Payment.into()) - } - if self.0 != account.nonce { - return Err(if self.0 < account.nonce { - InvalidTransaction::Stale - } else { - InvalidTransaction::Future - } - .into()) - } - account.nonce += T::Nonce::one(); - crate::Account::::insert(who, account); - Ok(()) - } fn validate( &self, - who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let Some(who) = origin.as_system_origin_signer() else { + return Ok((Default::default(), None, origin)) + }; let account = crate::Account::::get(who); if account.providers.is_zero() && account.sufficients.is_zero() { // Nonce storage not paid for - return InvalidTransaction::Payment.into() + return Err(InvalidTransaction::Payment.into()) } if self.0 < account.nonce { - return InvalidTransaction::Stale.into() + return Err(InvalidTransaction::Stale.into()) } - let provides = vec![Encode::encode(&(who, self.0))]; + let provides = vec![Encode::encode(&(who.clone(), self.0))]; let requires = if account.nonce < self.0 { - vec![Encode::encode(&(who, self.0 - One::one()))] + vec![Encode::encode(&(who.clone(), self.0.saturating_sub(One::one())))] } else { vec![] }; - Ok(ValidTransaction { + let validity = ValidTransaction { priority: 0, requires, provides, longevity: TransactionLongevity::max_value(), propagate: true, - }) + }; + + Ok((validity, Some((who.clone(), account)), origin)) + } + + fn prepare( + self, + val: Self::Val, + _origin: &T::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _context: &Context, + ) -> Result { + let Some((who, mut account)) = val else { return Ok(()) }; + // `self.0 < account.nonce` already checked in `validate`. + if self.0 > account.nonce { + return Err(InvalidTransaction::Future.into()) + } + account.nonce.saturating_inc(); + crate::Account::::insert(who, account); + Ok(()) } } @@ -134,7 +140,8 @@ where mod tests { use super::*; use crate::mock::{new_test_ext, Test, CALL}; - use frame_support::{assert_noop, assert_ok}; + use frame_support::assert_ok; + use sp_runtime::traits::DispatchTransaction; #[test] fn signed_ext_check_nonce_works() { @@ -152,22 +159,33 @@ mod tests { let info = DispatchInfo::default(); let len = 0_usize; // stale - assert_noop!( - CheckNonce::(0).validate(&1, CALL, &info, len), - InvalidTransaction::Stale + assert_eq!( + CheckNonce::(0) + .validate_only(Some(1).into(), CALL, &info, len,) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Stale) ); - assert_noop!( - CheckNonce::(0).pre_dispatch(&1, CALL, &info, len), - InvalidTransaction::Stale + assert_eq!( + CheckNonce::(0) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap_err(), + InvalidTransaction::Stale.into() ); // correct - assert_ok!(CheckNonce::(1).validate(&1, CALL, &info, len)); - assert_ok!(CheckNonce::(1).pre_dispatch(&1, CALL, &info, len)); + assert_ok!(CheckNonce::(1).validate_only(Some(1).into(), CALL, &info, len)); + assert_ok!(CheckNonce::(1).validate_and_prepare( + Some(1).into(), + CALL, + &info, + len + )); // future - assert_ok!(CheckNonce::(5).validate(&1, CALL, &info, len)); - assert_noop!( - CheckNonce::(5).pre_dispatch(&1, CALL, &info, len), - InvalidTransaction::Future + assert_ok!(CheckNonce::(5).validate_only(Some(1).into(), CALL, &info, len)); + assert_eq!( + CheckNonce::(5) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap_err(), + InvalidTransaction::Future.into() ); }) } @@ -198,20 +216,44 @@ mod tests { let info = DispatchInfo::default(); let len = 0_usize; // Both providers and sufficients zero - assert_noop!( - CheckNonce::(1).validate(&1, CALL, &info, len), - InvalidTransaction::Payment + assert_eq!( + CheckNonce::(1) + .validate_only(Some(1).into(), CALL, &info, len) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Payment) ); - assert_noop!( - CheckNonce::(1).pre_dispatch(&1, CALL, &info, len), - InvalidTransaction::Payment + assert_eq!( + CheckNonce::(1) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Payment) ); // Non-zero providers - assert_ok!(CheckNonce::(1).validate(&2, CALL, &info, len)); - assert_ok!(CheckNonce::(1).pre_dispatch(&2, CALL, &info, len)); + assert_ok!(CheckNonce::(1).validate_only(Some(2).into(), CALL, &info, len)); + assert_ok!(CheckNonce::(1).validate_and_prepare( + Some(2).into(), + CALL, + &info, + len + )); // Non-zero sufficients - assert_ok!(CheckNonce::(1).validate(&3, CALL, &info, len)); - assert_ok!(CheckNonce::(1).pre_dispatch(&3, CALL, &info, len)); + assert_ok!(CheckNonce::(1).validate_only(Some(3).into(), CALL, &info, len)); + assert_ok!(CheckNonce::(1).validate_and_prepare( + Some(3).into(), + CALL, + &info, + len + )); + }) + } + + #[test] + fn unsigned_check_nonce_works() { + new_test_ext().execute_with(|| { + let info = DispatchInfo::default(); + let len = 0_usize; + assert_ok!(CheckNonce::(1).validate_only(None.into(), CALL, &info, len)); + assert_ok!(CheckNonce::(1).validate_and_prepare(None.into(), CALL, &info, len)); }) } } diff --git a/substrate/frame/system/src/extensions/check_spec_version.rs b/substrate/frame/system/src/extensions/check_spec_version.rs index 24d5ef9cafb17b0dd8c1ac02fc127f7068cf6cca..3109708f48efe467b6c727c6813db2275b70c548 100644 --- a/substrate/frame/system/src/extensions/check_spec_version.rs +++ b/substrate/frame/system/src/extensions/check_spec_version.rs @@ -19,7 +19,8 @@ use crate::{Config, Pallet}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension}, + impl_tx_ext_default, + traits::{transaction_extension::TransactionExtensionBase, TransactionExtension}, transaction_validity::TransactionValidityError, }; @@ -46,30 +47,26 @@ impl sp_std::fmt::Debug for CheckSpecVersion { } impl CheckSpecVersion { - /// Create new `SignedExtension` to check runtime version. + /// Create new `TransactionExtension` to check runtime version. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckSpecVersion { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = u32; - type Pre = (); +impl TransactionExtensionBase for CheckSpecVersion { const IDENTIFIER: &'static str = "CheckSpecVersion"; - - fn additional_signed(&self) -> Result { + type Implicit = u32; + fn implicit(&self) -> Result { Ok(>::runtime_version().spec_version) } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn weight(&self) -> sp_weights::Weight { + ::check_spec_version() } } +impl TransactionExtension<::RuntimeCall, Context> + for CheckSpecVersion +{ + type Val = (); + type Pre = (); + impl_tx_ext_default!(::RuntimeCall; Context; validate prepare); +} diff --git a/substrate/frame/system/src/extensions/check_tx_version.rs b/substrate/frame/system/src/extensions/check_tx_version.rs index 3f9d6a1903fe1d08d05266a46625aeacba3c273c..ee1d507e7bc83d8bd90f08d1fb04f7b67842a011 100644 --- a/substrate/frame/system/src/extensions/check_tx_version.rs +++ b/substrate/frame/system/src/extensions/check_tx_version.rs @@ -19,7 +19,8 @@ use crate::{Config, Pallet}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension}, + impl_tx_ext_default, + traits::{transaction_extension::TransactionExtensionBase, TransactionExtension}, transaction_validity::TransactionValidityError, }; @@ -46,29 +47,26 @@ impl sp_std::fmt::Debug for CheckTxVersion { } impl CheckTxVersion { - /// Create new `SignedExtension` to check transaction version. + /// Create new `TransactionExtension` to check transaction version. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckTxVersion { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = u32; - type Pre = (); +impl TransactionExtensionBase for CheckTxVersion { const IDENTIFIER: &'static str = "CheckTxVersion"; - - fn additional_signed(&self) -> Result { + type Implicit = u32; + fn implicit(&self) -> Result { Ok(>::runtime_version().transaction_version) } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn weight(&self) -> sp_weights::Weight { + ::check_tx_version() } } +impl TransactionExtension<::RuntimeCall, Context> + for CheckTxVersion +{ + type Val = (); + type Pre = (); + impl_tx_ext_default!(::RuntimeCall; Context; validate prepare); +} diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 70d1e75633278c665f4c06ca22f02a96668ef964..5c35acfb3f355c6784b4f6f9e2ca1269463346f0 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -23,9 +23,12 @@ use frame_support::{ }; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension}, - transaction_validity::{InvalidTransaction, TransactionValidity, TransactionValidityError}, - DispatchResult, + traits::{ + DispatchInfoOf, Dispatchable, PostDispatchInfoOf, TransactionExtension, + TransactionExtensionBase, ValidateResult, + }, + transaction_validity::{InvalidTransaction, TransactionValidityError}, + DispatchResult, ValidTransaction, }; use sp_weights::Weight; @@ -100,39 +103,43 @@ where } } - /// Creates new `SignedExtension` to check weight of the extrinsic. + /// Creates new `TransactionExtension` to check weight of the extrinsic. pub fn new() -> Self { Self(Default::default()) } - /// Do the pre-dispatch checks. This can be applied to both signed and unsigned. + /// Do the validate checks. This can be applied to both signed and unsigned. /// - /// It checks and notes the new weight and length. - pub fn do_pre_dispatch( + /// It only checks that the block weight and length limit will not exceed. + /// + /// Returns the transaction validity and the next block length, to be used in `prepare`. + pub fn do_validate( info: &DispatchInfoOf, len: usize, - ) -> Result<(), TransactionValidityError> { + ) -> Result<(ValidTransaction, u32), TransactionValidityError> { + // ignore the next length. If they return `Ok`, then it is below the limit. let next_len = Self::check_block_length(info, len)?; - let next_weight = Self::check_block_weight(info)?; + // during validation we skip block limit check. Since the `validate_transaction` + // call runs on an empty block anyway, by this we prevent `on_initialize` weight + // consumption from causing false negatives. Self::check_extrinsic_weight(info)?; - crate::AllExtrinsicsLen::::put(next_len); - crate::BlockWeight::::put(next_weight); - Ok(()) + Ok((Default::default(), next_len)) } - /// Do the validate checks. This can be applied to both signed and unsigned. + /// Do the pre-dispatch checks. This can be applied to both signed and unsigned. /// - /// It only checks that the block weight and length limit will not exceed. - pub fn do_validate(info: &DispatchInfoOf, len: usize) -> TransactionValidity { - // ignore the next length. If they return `Ok`, then it is below the limit. - let _ = Self::check_block_length(info, len)?; - // during validation we skip block limit check. Since the `validate_transaction` - // call runs on an empty block anyway, by this we prevent `on_initialize` weight - // consumption from causing false negatives. - Self::check_extrinsic_weight(info)?; + /// It checks and notes the new weight and length. + pub fn do_prepare( + info: &DispatchInfoOf, + next_len: u32, + ) -> Result<(), TransactionValidityError> { + let next_weight = Self::check_block_weight(info)?; + // Extrinsic weight already checked in `validate`. - Ok(Default::default()) + crate::AllExtrinsicsLen::::put(next_len); + crate::BlockWeight::::put(next_weight); + Ok(()) } } @@ -201,62 +208,55 @@ where Ok(all_weight) } -impl SignedExtension for CheckWeight +impl TransactionExtensionBase for CheckWeight { + const IDENTIFIER: &'static str = "CheckWeight"; + type Implicit = (); + + fn weight(&self) -> Weight { + ::check_weight() + } +} +impl TransactionExtension + for CheckWeight where T::RuntimeCall: Dispatchable, { - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); type Pre = (); - const IDENTIFIER: &'static str = "CheckWeight"; - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result<(), TransactionValidityError> { - Self::do_pre_dispatch(info, len) - } + type Val = u32; /* next block length */ fn validate( &self, - _who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - Self::do_validate(info, len) - } - - fn pre_dispatch_unsigned( - _call: &Self::Call, - info: &DispatchInfoOf, + origin: T::RuntimeOrigin, + _call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> Result<(), TransactionValidityError> { - Self::do_pre_dispatch(info, len) + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let (validity, next_len) = Self::do_validate(info, len)?; + Ok((validity, next_len, origin)) } - fn validate_unsigned( - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - Self::do_validate(info, len) + fn prepare( + self, + val: Self::Val, + _origin: &T::RuntimeOrigin, + _call: &T::RuntimeCall, + info: &DispatchInfoOf, + _len: usize, + _context: &Context, + ) -> Result { + Self::do_prepare(info, val) } fn post_dispatch( - _pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + _pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, _len: usize, _result: &DispatchResult, + _context: &Context, ) -> Result<(), TransactionValidityError> { let unspent = post_info.calc_unspent(info); if unspent.any_gt(Weight::zero()) { @@ -301,6 +301,7 @@ mod tests { AllExtrinsicsLen, BlockWeight, DispatchClass, }; use frame_support::{assert_err, assert_ok, dispatch::Pays, weights::Weight}; + use sp_runtime::traits::DispatchTransaction; use sp_std::marker::PhantomData; fn block_weights() -> crate::limits::BlockWeights { @@ -338,7 +339,8 @@ mod tests { } check(|max, len| { - assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); + let next_len = CheckWeight::::check_block_length(max, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(max, next_len)); assert_eq!(System::block_weight().total(), Weight::MAX); assert!(System::block_weight().total().ref_time() > block_weight_limit().ref_time()); }); @@ -419,9 +421,11 @@ mod tests { let len = 0_usize; - assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + let next_len = CheckWeight::::check_block_length(&max_normal, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&max_normal, next_len)); assert_eq!(System::block_weight().total(), Weight::from_parts(768, 0)); - assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); + let next_len = CheckWeight::::check_block_length(&rest_operational, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&rest_operational, next_len)); assert_eq!(block_weight_limit(), Weight::from_parts(1024, u64::MAX)); assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); // Checking single extrinsic should not take current block weight into account. @@ -443,10 +447,12 @@ mod tests { let len = 0_usize; - assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); + let next_len = CheckWeight::::check_block_length(&rest_operational, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&rest_operational, next_len)); // Extra 20 here from block execution + base extrinsic weight assert_eq!(System::block_weight().total(), Weight::from_parts(266, 0)); - assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + let next_len = CheckWeight::::check_block_length(&max_normal, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&max_normal, next_len)); assert_eq!(block_weight_limit(), Weight::from_parts(1024, u64::MAX)); assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); }); @@ -469,16 +475,19 @@ mod tests { }; let len = 0_usize; + let next_len = CheckWeight::::check_block_length(&dispatch_normal, len).unwrap(); assert_err!( - CheckWeight::::do_pre_dispatch(&dispatch_normal, len), + CheckWeight::::do_prepare(&dispatch_normal, next_len), InvalidTransaction::ExhaustsResources ); + let next_len = + CheckWeight::::check_block_length(&dispatch_operational, len).unwrap(); // Thank goodness we can still do an operational transaction to possibly save the // blockchain. - assert_ok!(CheckWeight::::do_pre_dispatch(&dispatch_operational, len)); + assert_ok!(CheckWeight::::do_prepare(&dispatch_operational, next_len)); // Not too much though assert_err!( - CheckWeight::::do_pre_dispatch(&dispatch_operational, len), + CheckWeight::::do_prepare(&dispatch_operational, next_len), InvalidTransaction::ExhaustsResources ); // Even with full block, validity of single transaction should be correct. @@ -503,21 +512,35 @@ mod tests { current_weight.set(normal_limit, DispatchClass::Normal) }); // will not fit. - assert_err!( - CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), - InvalidTransaction::ExhaustsResources + assert_eq!( + CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &normal, len) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() ); // will fit. - assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len)); + assert_ok!(CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + &op, + len + )); // likewise for length limit. let len = 100_usize; AllExtrinsicsLen::::put(normal_length_limit()); - assert_err!( - CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), - InvalidTransaction::ExhaustsResources + assert_eq!( + CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &normal, len) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() ); - assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len)); + assert_ok!(CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + &op, + len + )); }) } @@ -528,7 +551,12 @@ mod tests { let normal_limit = normal_weight_limit().ref_time() as usize; let reset_check_weight = |tx, s, f| { AllExtrinsicsLen::::put(0); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, tx, s); + let r = CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + tx, + s, + ); if f { assert!(r.is_err()) } else { @@ -571,7 +599,12 @@ mod tests { BlockWeight::::mutate(|current_weight| { current_weight.set(s, DispatchClass::Normal) }); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, i, len); + let r = CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + i, + len, + ); if f { assert!(r.is_err()) } else { @@ -604,18 +637,22 @@ mod tests { .set(Weight::from_parts(256, 0) - base_extrinsic, DispatchClass::Normal); }); - let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); + let pre = CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap() + .0; assert_eq!( BlockWeight::::get().total(), info.weight + Weight::from_parts(256, 0) ); assert_ok!(CheckWeight::::post_dispatch( - Some(pre), + pre, &info, &post_info, len, - &Ok(()) + &Ok(()), + &() )); assert_eq!( BlockWeight::::get().total(), @@ -639,7 +676,10 @@ mod tests { current_weight.set(Weight::from_parts(128, 0), DispatchClass::Normal); }); - let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); + let pre = CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap() + .0; assert_eq!( BlockWeight::::get().total(), info.weight + @@ -648,11 +688,12 @@ mod tests { ); assert_ok!(CheckWeight::::post_dispatch( - Some(pre), + pre, &info, &post_info, len, - &Ok(()) + &Ok(()), + &() )); assert_eq!( BlockWeight::::get().total(), @@ -672,7 +713,12 @@ mod tests { // Initial weight from `weights.base_block` assert_eq!(System::block_weight().total(), weights.base_block); - assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len)); + assert_ok!(CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + &free, + len + )); assert_eq!( System::block_weight().total(), weights.get(DispatchClass::Normal).base_extrinsic + weights.base_block @@ -696,9 +742,11 @@ mod tests { let len = 0_usize; - assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + let next_len = CheckWeight::::check_block_length(&max_normal, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&max_normal, next_len)); assert_eq!(System::block_weight().total(), Weight::from_parts(768, 0)); - assert_ok!(CheckWeight::::do_pre_dispatch(&mandatory, len)); + let next_len = CheckWeight::::check_block_length(&mandatory, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&mandatory, next_len)); assert_eq!(block_weight_limit(), Weight::from_parts(1024, u64::MAX)); assert_eq!(System::block_weight().total(), Weight::from_parts(1024 + 768, 0)); assert_eq!(CheckWeight::::check_extrinsic_weight(&mandatory), Ok(())); diff --git a/substrate/frame/system/src/extensions/mod.rs b/substrate/frame/system/src/extensions/mod.rs index a88c9fbf96ebdac86d9c45a8e5b07020c3325bb0..d79104d224035450f9ca69ba7d4b502b9ff0289d 100644 --- a/substrate/frame/system/src/extensions/mod.rs +++ b/substrate/frame/system/src/extensions/mod.rs @@ -22,3 +22,6 @@ pub mod check_nonce; pub mod check_spec_version; pub mod check_tx_version; pub mod check_weight; +pub mod weights; + +pub use weights::WeightInfo; diff --git a/substrate/frame/system/src/extensions/weights.rs b/substrate/frame/system/src/extensions/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..0d2fcb15ee66e927bb21c159f5b5b7a3dcbb22f5 --- /dev/null +++ b/substrate/frame/system/src/extensions/weights.rs @@ -0,0 +1,196 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/system/src/extensions/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `frame_system_extensions`. +pub trait WeightInfo { + fn check_genesis() -> Weight; + fn check_mortality() -> Weight; + fn check_non_zero_sender() -> Weight; + fn check_nonce() -> Weight; + fn check_spec_version() -> Weight; + fn check_tx_version() -> Weight; + fn check_weight() -> Weight; +} + +/// Weights for `frame_system_extensions` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_876_000 picoseconds. + Weight::from_parts(4_160_000, 3509) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 6_296_000 picoseconds. + Weight::from_parts(6_523_000, 3509) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 449_000 picoseconds. + Weight::from_parts(527_000, 0) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 5_689_000 picoseconds. + Weight::from_parts(6_000_000, 3593) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 399_000 picoseconds. + Weight::from_parts(461_000, 0) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 390_000 picoseconds. + Weight::from_parts(439_000, 0) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 4_375_000 picoseconds. + Weight::from_parts(4_747_000, 1489) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_876_000 picoseconds. + Weight::from_parts(4_160_000, 3509) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 6_296_000 picoseconds. + Weight::from_parts(6_523_000, 3509) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 449_000 picoseconds. + Weight::from_parts(527_000, 0) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 5_689_000 picoseconds. + Weight::from_parts(6_000_000, 3593) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 399_000 picoseconds. + Weight::from_parts(461_000, 0) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 390_000 picoseconds. + Weight::from_parts(439_000, 0) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 4_375_000 picoseconds. + Weight::from_parts(4_747_000, 1489) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 1405c7303f870e1c5f49d6766d4a8fd39412a5e9..0318f77342e00380bc6b663e3973e01eb629efa7 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -130,11 +130,13 @@ use frame_support::{ DispatchResult, DispatchResultWithPostInfo, PerDispatchClass, PostDispatchInfo, }, ensure, impl_ensure_origin_with_arg_ignoring_arg, + migrations::MultiStepMigrator, pallet_prelude::Pays, storage::{self, StorageStreamIter}, traits::{ ConstU32, Contains, EnsureOrigin, EnsureOriginWithArg, Get, HandleLifetime, - OnKilledAccount, OnNewAccount, OriginTrait, PalletInfo, SortedMembers, StoredMap, TypedGet, + OnKilledAccount, OnNewAccount, OnRuntimeUpgrade, OriginTrait, PalletInfo, SortedMembers, + StoredMap, TypedGet, }, Parameter, }; @@ -164,11 +166,12 @@ pub use extensions::{ check_genesis::CheckGenesis, check_mortality::CheckMortality, check_non_zero_sender::CheckNonZeroSender, check_nonce::CheckNonce, check_spec_version::CheckSpecVersion, check_tx_version::CheckTxVersion, - check_weight::CheckWeight, + check_weight::CheckWeight, WeightInfo as ExtensionsWeightInfo, }; // Backward compatible re-export. pub use extensions::check_mortality::CheckMortality as CheckEra; pub use frame_support::dispatch::RawOrigin; +use frame_support::traits::{PostInherents, PostTransactions, PreInherents}; pub use weights::WeightInfo; const LOG_TARGET: &str = "runtime::system"; @@ -281,6 +284,7 @@ pub mod pallet { type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type ExtensionsWeightInfo = (); type SS58Prefix = (); type Version = (); type BlockWeights = (); @@ -299,9 +303,14 @@ pub mod pallet { type BaseCallFilter = frame_support::traits::Everything; type BlockHashCount = frame_support::traits::ConstU64<10>; type OnSetCode = (); + type SingleBlockMigrations = (); + type MultiBlockMigrator = (); + type PreInherents = (); + type PostInherents = (); + type PostTransactions = (); } - /// Default configurations of this pallet in a solo-chain environment. + /// Default configurations of this pallet in a solochain environment. /// /// ## Considerations: /// @@ -348,6 +357,9 @@ pub mod pallet { /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = (); + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = (); + /// This is used as an identifier of the chain. type SS58Prefix = (); @@ -393,6 +405,11 @@ pub mod pallet { /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); + type SingleBlockMigrations = (); + type MultiBlockMigrator = (); + type PreInherents = (); + type PostInherents = (); + type PostTransactions = (); } /// Default configurations of this pallet in a relay-chain environment. @@ -452,6 +469,7 @@ pub mod pallet { + Clone + OriginTrait; + #[docify::export(system_runtime_call)] /// The aggregated `RuntimeCall` type. #[pallet::no_default_bounds] type RuntimeCall: Parameter @@ -524,7 +542,7 @@ pub mod pallet { #[pallet::constant] type DbWeight: Get; - /// Get the chain's current version. + /// Get the chain's in-code version. #[pallet::constant] type Version: Get; @@ -549,8 +567,12 @@ pub mod pallet { /// All resources should be cleaned up associated with the given account. type OnKilledAccount: OnKilledAccount; + /// Weight information for the extrinsics of this pallet. type SystemWeightInfo: WeightInfo; + /// Weight information for the transaction extensions of this pallet. + type ExtensionsWeightInfo: extensions::WeightInfo; + /// The designated SS58 prefix of this chain. /// /// This replaces the "ss58Format" property declared in the chain spec. Reason is @@ -571,6 +593,35 @@ pub mod pallet { /// The maximum number of consumers allowed on a single account. type MaxConsumers: ConsumerLimits; + + /// All migrations that should run in the next runtime upgrade. + /// + /// These used to be formerly configured in `Executive`. Parachains need to ensure that + /// running all these migrations in one block will not overflow the weight limit of a block. + /// The migrations are run *before* the pallet `on_runtime_upgrade` hooks, just like the + /// `OnRuntimeUpgrade` migrations. + type SingleBlockMigrations: OnRuntimeUpgrade; + + /// The migrator that is used to run Multi-Block-Migrations. + /// + /// Can be set to [`pallet-migrations`] or an alternative implementation of the interface. + /// The diagram in `frame_executive::block_flowchart` explains when it runs. + type MultiBlockMigrator: MultiStepMigrator; + + /// A callback that executes in *every block* directly before all inherents were applied. + /// + /// See `frame_executive::block_flowchart` for a in-depth explanation when it runs. + type PreInherents: PreInherents; + + /// A callback that executes in *every block* directly after all inherents were applied. + /// + /// See `frame_executive::block_flowchart` for a in-depth explanation when it runs. + type PostInherents: PostInherents; + + /// A callback that executes in *every block* directly after all transactions were applied. + /// + /// See `frame_executive::block_flowchart` for a in-depth explanation when it runs. + type PostTransactions: PostTransactions; } #[pallet::pallet] @@ -618,6 +669,9 @@ pub mod pallet { } /// Set the new runtime code without doing any checks of the given `code`. + /// + /// Note that runtime upgrades will not run if this is called with a not-increasing spec + /// version! #[pallet::call_index(3)] #[pallet::weight((T::SystemWeightInfo::set_code(), DispatchClass::Operational))] pub fn set_code_without_checks( @@ -813,6 +867,8 @@ pub mod pallet { NonZeroRefCount, /// The origin filter prevent the call to be dispatched. CallFiltered, + /// A multi-block migration is ongoing and prevents the current code from being replaced. + MultiBlockMigrationsOngoing, #[cfg(feature = "experimental")] /// The specified [`Task`] is not valid. InvalidTask, @@ -844,6 +900,10 @@ pub mod pallet { #[pallet::storage] pub(super) type ExtrinsicCount = StorageValue<_, u32>; + /// Whether all inherents have been applied. + #[pallet::storage] + pub type InherentsApplied = StorageValue<_, bool, ValueQuery>; + /// The current weight for the block. #[pallet::storage] #[pallet::whitelist_storage] @@ -893,6 +953,7 @@ pub mod pallet { /// just in case someone still reads them from within the runtime. #[pallet::storage] #[pallet::whitelist_storage] + #[pallet::disable_try_decode_storage] #[pallet::unbounded] pub(super) type Events = StorageValue<_, Vec>>, ValueQuery>; @@ -1371,6 +1432,19 @@ impl Pallet { Self::deposit_event(Event::CodeUpdated); } + /// Whether all inherents have been applied. + pub fn inherents_applied() -> bool { + InherentsApplied::::get() + } + + /// Note that all inherents have been applied. + /// + /// Should be called immediately after all inherents have been applied. Must be called at least + /// once per block. + pub fn note_inherents_applied() { + InherentsApplied::::put(true); + } + /// Increment the reference counter on an account. #[deprecated = "Use `inc_consumers` instead"] pub fn inc_ref(who: &T::AccountId) { @@ -1690,6 +1764,7 @@ impl Pallet { >::put(digest); >::put(parent_hash); >::insert(*number - One::one(), parent_hash); + >::kill(); // Remove previous block data from storage BlockWeight::::kill(); @@ -1736,6 +1811,7 @@ impl Pallet { ExecutionPhase::::kill(); AllExtrinsicsLen::::kill(); storage::unhashed::kill(well_known_keys::INTRABLOCK_ENTROPY); + InherentsApplied::::kill(); // The following fields // @@ -1979,10 +2055,14 @@ impl Pallet { /// Determine whether or not it is possible to update the code. /// - /// Checks the given code if it is a valid runtime wasm blob by instantianting + /// Checks the given code if it is a valid runtime wasm blob by instantiating /// it and extracting the runtime version of it. It checks that the runtime version /// of the old and new runtime has the same spec name and that the spec version is increasing. pub fn can_set_code(code: &[u8]) -> Result<(), sp_runtime::DispatchError> { + if T::MultiBlockMigrator::ongoing() { + return Err(Error::::MultiBlockMigrationsOngoing.into()) + } + let current_version = T::Version::get(); let new_version = sp_io::misc::runtime_version(code) .and_then(|v| RuntimeVersion::decode(&mut &v[..]).ok()) diff --git a/substrate/frame/system/src/mock.rs b/substrate/frame/system/src/mock.rs index c4108099e39ffdf306d7a9e5e0af8a619336eaea..7059845a7df33104a66348803fee6396bf2f93d2 100644 --- a/substrate/frame/system/src/mock.rs +++ b/substrate/frame/system/src/mock.rs @@ -86,6 +86,22 @@ impl Config for Test { type Version = Version; type AccountData = u32; type OnKilledAccount = RecordKilled; + type MultiBlockMigrator = MockedMigrator; +} + +parameter_types! { + pub static Ongoing: bool = false; +} + +pub struct MockedMigrator; +impl frame_support::migrations::MultiStepMigrator for MockedMigrator { + fn ongoing() -> bool { + Ongoing::get() + } + + fn step() -> Weight { + Weight::zero() + } } pub type SysEvent = frame_system::Event; diff --git a/substrate/frame/system/src/offchain.rs b/substrate/frame/system/src/offchain.rs index a019cfd666e8cb3ddccc02aa706692cbe6cee2e3..ee73e33fe31302ea1fb864b7e88164f3feb02e39 100644 --- a/substrate/frame/system/src/offchain.rs +++ b/substrate/frame/system/src/offchain.rs @@ -79,6 +79,9 @@ pub struct SubmitTransaction, Overarchi _phantom: sp_std::marker::PhantomData<(T, OverarchingCall)>, } +// TODO [#2415]: Avoid splitting call and the totally opaque `signature`; `CreateTransaction` trait +// should provide something which impls `Encode`, which can be sent onwards to +// `sp_io::offchain::submit_transaction`. There's no great need to split things up as in here. impl SubmitTransaction where T: SendTransactionTypes, @@ -88,6 +91,8 @@ where call: >::OverarchingCall, signature: Option<::SignaturePayload>, ) -> Result<(), ()> { + // TODO: Use regular transaction API instead. + #[allow(deprecated)] let xt = T::Extrinsic::new(call, signature).ok_or(())?; sp_io::offchain::submit_transaction(xt.encode()) } @@ -471,7 +476,7 @@ pub trait SendTransactionTypes { /// /// This trait is meant to be implemented by the runtime and is responsible for constructing /// a payload to be signed and contained within the extrinsic. -/// This will most likely include creation of `SignedExtra` (a set of `SignedExtensions`). +/// This will most likely include creation of `TxExtension` (a tuple of `TransactionExtension`s). /// Note that the result can be altered by inspecting the `Call` (for instance adjusting /// fees, or mortality depending on the `pallet` being called). pub trait CreateSignedTransaction: @@ -621,14 +626,17 @@ mod tests { use crate::mock::{RuntimeCall, Test as TestRuntime, CALL}; use codec::Decode; use sp_core::offchain::{testing, TransactionPoolExt}; - use sp_runtime::testing::{TestSignature, TestXt, UintAuthorityId}; + use sp_runtime::{ + generic::UncheckedExtrinsic, + testing::{TestSignature, UintAuthorityId}, + }; impl SigningTypes for TestRuntime { type Public = UintAuthorityId; type Signature = TestSignature; } - type Extrinsic = TestXt; + type Extrinsic = UncheckedExtrinsic; impl SendTransactionTypes for TestRuntime { type Extrinsic = Extrinsic; @@ -693,7 +701,7 @@ mod tests { let _tx3 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } @@ -724,7 +732,7 @@ mod tests { let tx1 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } @@ -758,7 +766,7 @@ mod tests { let _tx2 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } @@ -790,7 +798,7 @@ mod tests { let tx1 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } } diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs index e437e7f9f39b0845d9f7eef33e3c90546c8dbbd2..b889b5ca046efe557e7706870c11febce8a358b2 100644 --- a/substrate/frame/system/src/tests.rs +++ b/substrate/frame/system/src/tests.rs @@ -675,6 +675,28 @@ fn set_code_with_real_wasm_blob() { }); } +#[test] +fn set_code_rejects_during_mbm() { + Ongoing::set(true); + + let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); + let mut ext = new_test_ext(); + ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(executor)); + ext.execute_with(|| { + System::set_block_number(1); + let res = System::set_code( + RawOrigin::Root.into(), + substrate_test_runtime_client::runtime::wasm_binary_unwrap().to_vec(), + ); + assert_eq!( + res, + Err(DispatchErrorWithPostInfo::from(Error::::MultiBlockMigrationsOngoing)) + ); + + assert!(System::events().is_empty()); + }); +} + #[test] fn set_code_via_authorization_works() { let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); diff --git a/substrate/frame/system/src/weights.rs b/substrate/frame/system/src/weights.rs index 41807dea1c55f9cd246db231fc9102d4b9eb5c4f..bd64bbf37639f4b257b0e2d3b75e812131b16e72 100644 --- a/substrate/frame/system/src/weights.rs +++ b/substrate/frame/system/src/weights.rs @@ -15,30 +15,31 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for frame_system +//! Autogenerated weights for `frame_system` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-s7kdgajz-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=frame_system +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=frame-system -// --chain=dev -// --header=./HEADER-APACHE2 -// --output=./frame/system/src/weights.rs -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/system/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for frame_system. +/// Weight functions needed for `frame_system`. pub trait WeightInfo { fn remark(b: u32, ) -> Weight; fn remark_with_event(b: u32, ) -> Weight; @@ -61,7 +62,7 @@ pub trait WeightInfo { fn apply_authorized_upgrade() -> Weight; } -/// Weights for frame_system using the Substrate node and recommended hardware. +/// Weights for `frame_system` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 3932160]`. @@ -69,84 +70,86 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_004_000 picoseconds. - Weight::from_parts(2_119_000, 0) + // Minimum execution time: 2_130_000 picoseconds. + Weight::from_parts(2_976_430, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(390, 0).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(386, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_032_000 picoseconds. - Weight::from_parts(8_097_000, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_455, 0).saturating_mul(b.into())) + // Minimum execution time: 5_690_000 picoseconds. + Weight::from_parts(15_071_416, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_387, 0).saturating_mul(b.into())) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a686561707061676573` (r:0 w:1) - /// Proof Skipped: unknown `0x3a686561707061676573` (r:0 w:1) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) fn set_heap_pages() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 4_446_000 picoseconds. - Weight::from_parts(4_782_000, 1485) + // Minimum execution time: 3_822_000 picoseconds. + Weight::from_parts(4_099_000, 1485) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a636f6465` (r:0 w:1) - /// Proof Skipped: unknown `0x3a636f6465` (r:0 w:1) + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 84_000_503_000 picoseconds. - Weight::from_parts(87_586_619_000, 1485) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Measured: `142` + // Estimated: `67035` + // Minimum execution time: 81_512_045_000 picoseconds. + Weight::from_parts(82_321_281_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn set_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_086_000 picoseconds. - Weight::from_parts(2_175_000, 0) - // Standard Error: 1_056 - .saturating_add(Weight::from_parts(841_511, 0).saturating_mul(i.into())) + // Minimum execution time: 2_074_000 picoseconds. + Weight::from_parts(2_137_000, 0) + // Standard Error: 879 + .saturating_add(Weight::from_parts(797_224, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn kill_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_000_000 picoseconds. - Weight::from_parts(2_255_000, 0) - // Standard Error: 1_425 - .saturating_add(Weight::from_parts(662_473, 0).saturating_mul(i.into())) + // Minimum execution time: 2_122_000 picoseconds. + Weight::from_parts(2_208_000, 0) + // Standard Error: 855 + .saturating_add(Weight::from_parts(594_034, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `115 + p * (69 ±0)` - // Estimated: `128 + p * (70 ±0)` - // Minimum execution time: 4_189_000 picoseconds. - Weight::from_parts(4_270_000, 128) - // Standard Error: 2_296 - .saturating_add(Weight::from_parts(1_389_650, 0).saturating_mul(p.into())) + // Measured: `129 + p * (69 ±0)` + // Estimated: `135 + p * (70 ±0)` + // Minimum execution time: 3_992_000 picoseconds. + Weight::from_parts(4_170_000, 135) + // Standard Error: 1_377 + .saturating_add(Weight::from_parts(1_267_892, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) @@ -157,114 +160,116 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 33_027_000 picoseconds. - Weight::from_parts(33_027_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) + // Minimum execution time: 8_872_000 picoseconds. + Weight::from_parts(9_513_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) /// Storage: `System::Digest` (r:1 w:1) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn apply_authorized_upgrade() -> Weight { // Proof Size summary in bytes: - // Measured: `22` - // Estimated: `1518` - // Minimum execution time: 118_101_992_000 picoseconds. - Weight::from_parts(118_101_992_000, 0) - .saturating_add(Weight::from_parts(0, 1518)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(3)) + // Measured: `164` + // Estimated: `67035` + // Minimum execution time: 85_037_546_000 picoseconds. + Weight::from_parts(85_819_414_000, 67035) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { /// The range of component `b` is `[0, 3932160]`. fn remark(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_004_000 picoseconds. - Weight::from_parts(2_119_000, 0) + // Minimum execution time: 2_130_000 picoseconds. + Weight::from_parts(2_976_430, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(390, 0).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(386, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_032_000 picoseconds. - Weight::from_parts(8_097_000, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_455, 0).saturating_mul(b.into())) + // Minimum execution time: 5_690_000 picoseconds. + Weight::from_parts(15_071_416, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_387, 0).saturating_mul(b.into())) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a686561707061676573` (r:0 w:1) - /// Proof Skipped: unknown `0x3a686561707061676573` (r:0 w:1) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) fn set_heap_pages() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 4_446_000 picoseconds. - Weight::from_parts(4_782_000, 1485) + // Minimum execution time: 3_822_000 picoseconds. + Weight::from_parts(4_099_000, 1485) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a636f6465` (r:0 w:1) - /// Proof Skipped: unknown `0x3a636f6465` (r:0 w:1) + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 84_000_503_000 picoseconds. - Weight::from_parts(87_586_619_000, 1485) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Measured: `142` + // Estimated: `67035` + // Minimum execution time: 81_512_045_000 picoseconds. + Weight::from_parts(82_321_281_000, 67035) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn set_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_086_000 picoseconds. - Weight::from_parts(2_175_000, 0) - // Standard Error: 1_056 - .saturating_add(Weight::from_parts(841_511, 0).saturating_mul(i.into())) + // Minimum execution time: 2_074_000 picoseconds. + Weight::from_parts(2_137_000, 0) + // Standard Error: 879 + .saturating_add(Weight::from_parts(797_224, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn kill_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_000_000 picoseconds. - Weight::from_parts(2_255_000, 0) - // Standard Error: 1_425 - .saturating_add(Weight::from_parts(662_473, 0).saturating_mul(i.into())) + // Minimum execution time: 2_122_000 picoseconds. + Weight::from_parts(2_208_000, 0) + // Standard Error: 855 + .saturating_add(Weight::from_parts(594_034, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `115 + p * (69 ±0)` - // Estimated: `128 + p * (70 ±0)` - // Minimum execution time: 4_189_000 picoseconds. - Weight::from_parts(4_270_000, 128) - // Standard Error: 2_296 - .saturating_add(Weight::from_parts(1_389_650, 0).saturating_mul(p.into())) + // Measured: `129 + p * (69 ±0)` + // Estimated: `135 + p * (70 ±0)` + // Minimum execution time: 3_992_000 picoseconds. + Weight::from_parts(4_170_000, 135) + // Standard Error: 1_377 + .saturating_add(Weight::from_parts(1_267_892, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) @@ -275,25 +280,25 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 33_027_000 picoseconds. - Weight::from_parts(33_027_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Minimum execution time: 8_872_000 picoseconds. + Weight::from_parts(9_513_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) /// Storage: `System::Digest` (r:1 w:1) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn apply_authorized_upgrade() -> Weight { // Proof Size summary in bytes: - // Measured: `22` - // Estimated: `1518` - // Minimum execution time: 118_101_992_000 picoseconds. - Weight::from_parts(118_101_992_000, 0) - .saturating_add(Weight::from_parts(0, 1518)) - .saturating_add(RocksDbWeight::get().reads(2)) - .saturating_add(RocksDbWeight::get().writes(3)) + // Measured: `164` + // Estimated: `67035` + // Minimum execution time: 85_037_546_000 picoseconds. + Weight::from_parts(85_819_414_000, 67035) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } } diff --git a/substrate/frame/timestamp/src/weights.rs b/substrate/frame/timestamp/src/weights.rs index 46c544734869442c17d2be8b354bed916e197b83..2df68ba0f1e716ed5c390f7a173ff449c35e88d2 100644 --- a/substrate/frame/timestamp/src/weights.rs +++ b/substrate/frame/timestamp/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_timestamp +//! Autogenerated weights for `pallet_timestamp` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/timestamp/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/timestamp/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,57 +49,57 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_timestamp. +/// Weight functions needed for `pallet_timestamp`. pub trait WeightInfo { fn set() -> Weight; fn on_finalize() -> Weight; } -/// Weights for pallet_timestamp using the Substrate node and recommended hardware. +/// Weights for `pallet_timestamp` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Timestamp Now (r:1 w:1) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe CurrentSlot (r:1 w:0) - /// Proof: Babe CurrentSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::CurrentSlot` (r:1 w:0) + /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set() -> Weight { // Proof Size summary in bytes: - // Measured: `312` + // Measured: `345` // Estimated: `1493` - // Minimum execution time: 9_857_000 picoseconds. - Weight::from_parts(10_492_000, 1493) + // Minimum execution time: 8_417_000 picoseconds. + Weight::from_parts(8_932_000, 1493) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } fn on_finalize() -> Weight { // Proof Size summary in bytes: - // Measured: `161` + // Measured: `194` // Estimated: `0` - // Minimum execution time: 4_175_000 picoseconds. - Weight::from_parts(4_334_000, 0) + // Minimum execution time: 3_911_000 picoseconds. + Weight::from_parts(4_092_000, 0) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Timestamp Now (r:1 w:1) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe CurrentSlot (r:1 w:0) - /// Proof: Babe CurrentSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::CurrentSlot` (r:1 w:0) + /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set() -> Weight { // Proof Size summary in bytes: - // Measured: `312` + // Measured: `345` // Estimated: `1493` - // Minimum execution time: 9_857_000 picoseconds. - Weight::from_parts(10_492_000, 1493) + // Minimum execution time: 8_417_000 picoseconds. + Weight::from_parts(8_932_000, 1493) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn on_finalize() -> Weight { // Proof Size summary in bytes: - // Measured: `161` + // Measured: `194` // Estimated: `0` - // Minimum execution time: 4_175_000 picoseconds. - Weight::from_parts(4_334_000, 0) + // Minimum execution time: 3_911_000 picoseconds. + Weight::from_parts(4_092_000, 0) } } diff --git a/substrate/frame/tips/src/lib.rs b/substrate/frame/tips/src/lib.rs index 4c7cfc3028a9f04c87022ffc89892b59ce3c5610..8c360fb57d72488553529ab4264eb7f8b444c064 100644 --- a/substrate/frame/tips/src/lib.rs +++ b/substrate/frame/tips/src/lib.rs @@ -122,7 +122,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] diff --git a/substrate/frame/tips/src/weights.rs b/substrate/frame/tips/src/weights.rs index ec6228667159ddbdbfecd12ccb5ff6250390829a..dbe1ed246ff015567e94e2396141d583b2a17a6c 100644 --- a/substrate/frame/tips/src/weights.rs +++ b/substrate/frame/tips/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_tips +//! Autogenerated weights for `pallet_tips` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/tips/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/tips/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_tips. +/// Weight functions needed for `pallet_tips`. pub trait WeightInfo { fn report_awesome(r: u32, ) -> Weight; fn retract_tip() -> Weight; @@ -60,220 +59,220 @@ pub trait WeightInfo { fn slash_tip(t: u32, ) -> Weight; } -/// Weights for pallet_tips using the Substrate node and recommended hardware. +/// Weights for `pallet_tips` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Tips Reasons (r:1 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Reasons` (r:1 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 300]`. fn report_awesome(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3469` - // Minimum execution time: 29_576_000 picoseconds. - Weight::from_parts(30_722_650, 3469) - // Standard Error: 192 - .saturating_add(Weight::from_parts(2_601, 0).saturating_mul(r.into())) + // Minimum execution time: 25_145_000 picoseconds. + Weight::from_parts(26_085_800, 3469) + // Standard Error: 216 + .saturating_add(Weight::from_parts(5_121, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Reasons` (r:0 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) fn retract_tip() -> Weight { // Proof Size summary in bytes: // Measured: `221` // Estimated: `3686` - // Minimum execution time: 28_522_000 picoseconds. - Weight::from_parts(29_323_000, 3686) + // Minimum execution time: 25_302_000 picoseconds. + Weight::from_parts(26_229_000, 3686) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:1 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Tips (r:0 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Reasons` (r:1 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Tips` (r:0 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 300]`. /// The range of component `t` is `[1, 13]`. fn tip_new(r: u32, t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `526 + t * (64 ±0)` // Estimated: `3991 + t * (64 ±0)` - // Minimum execution time: 19_650_000 picoseconds. - Weight::from_parts(19_837_982, 3991) - // Standard Error: 151 - .saturating_add(Weight::from_parts(1_746, 0).saturating_mul(r.into())) - // Standard Error: 3_588 - .saturating_add(Weight::from_parts(102_359, 0).saturating_mul(t.into())) + // Minimum execution time: 16_600_000 picoseconds. + Weight::from_parts(17_071_638, 3991) + // Standard Error: 131 + .saturating_add(Weight::from_parts(1_138, 0).saturating_mul(r.into())) + // Standard Error: 3_125 + .saturating_add(Weight::from_parts(82_996, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(t.into())) } - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. fn tip(t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `747 + t * (112 ±0)` // Estimated: `4212 + t * (112 ±0)` - // Minimum execution time: 15_641_000 picoseconds. - Weight::from_parts(15_745_460, 4212) - // Standard Error: 5_106 - .saturating_add(Weight::from_parts(229_475, 0).saturating_mul(t.into())) + // Minimum execution time: 13_972_000 picoseconds. + Weight::from_parts(14_269_356, 4212) + // Standard Error: 4_695 + .saturating_add(Weight::from_parts(205_433, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Tips::Reasons` (r:0 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. fn close_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `786 + t * (112 ±0)` // Estimated: `4242 + t * (112 ±0)` - // Minimum execution time: 62_059_000 picoseconds. - Weight::from_parts(64_604_554, 4242) - // Standard Error: 11_818 - .saturating_add(Weight::from_parts(116_297, 0).saturating_mul(t.into())) + // Minimum execution time: 51_878_000 picoseconds. + Weight::from_parts(54_513_477, 4242) + // Standard Error: 12_281 + .saturating_add(Weight::from_parts(126_605, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Reasons` (r:0 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. fn slash_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `269` // Estimated: `3734` - // Minimum execution time: 14_133_000 picoseconds. - Weight::from_parts(14_957_547, 3734) - // Standard Error: 2_765 - .saturating_add(Weight::from_parts(22_138, 0).saturating_mul(t.into())) + // Minimum execution time: 11_800_000 picoseconds. + Weight::from_parts(12_520_984, 3734) + // Standard Error: 2_360 + .saturating_add(Weight::from_parts(28_777, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Tips Reasons (r:1 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Reasons` (r:1 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 300]`. fn report_awesome(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3469` - // Minimum execution time: 29_576_000 picoseconds. - Weight::from_parts(30_722_650, 3469) - // Standard Error: 192 - .saturating_add(Weight::from_parts(2_601, 0).saturating_mul(r.into())) + // Minimum execution time: 25_145_000 picoseconds. + Weight::from_parts(26_085_800, 3469) + // Standard Error: 216 + .saturating_add(Weight::from_parts(5_121, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Reasons` (r:0 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) fn retract_tip() -> Weight { // Proof Size summary in bytes: // Measured: `221` // Estimated: `3686` - // Minimum execution time: 28_522_000 picoseconds. - Weight::from_parts(29_323_000, 3686) + // Minimum execution time: 25_302_000 picoseconds. + Weight::from_parts(26_229_000, 3686) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:1 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Tips (r:0 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Reasons` (r:1 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Tips` (r:0 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 300]`. /// The range of component `t` is `[1, 13]`. fn tip_new(r: u32, t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `526 + t * (64 ±0)` // Estimated: `3991 + t * (64 ±0)` - // Minimum execution time: 19_650_000 picoseconds. - Weight::from_parts(19_837_982, 3991) - // Standard Error: 151 - .saturating_add(Weight::from_parts(1_746, 0).saturating_mul(r.into())) - // Standard Error: 3_588 - .saturating_add(Weight::from_parts(102_359, 0).saturating_mul(t.into())) + // Minimum execution time: 16_600_000 picoseconds. + Weight::from_parts(17_071_638, 3991) + // Standard Error: 131 + .saturating_add(Weight::from_parts(1_138, 0).saturating_mul(r.into())) + // Standard Error: 3_125 + .saturating_add(Weight::from_parts(82_996, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(t.into())) } - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. fn tip(t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `747 + t * (112 ±0)` // Estimated: `4212 + t * (112 ±0)` - // Minimum execution time: 15_641_000 picoseconds. - Weight::from_parts(15_745_460, 4212) - // Standard Error: 5_106 - .saturating_add(Weight::from_parts(229_475, 0).saturating_mul(t.into())) + // Minimum execution time: 13_972_000 picoseconds. + Weight::from_parts(14_269_356, 4212) + // Standard Error: 4_695 + .saturating_add(Weight::from_parts(205_433, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Tips::Reasons` (r:0 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. fn close_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `786 + t * (112 ±0)` // Estimated: `4242 + t * (112 ±0)` - // Minimum execution time: 62_059_000 picoseconds. - Weight::from_parts(64_604_554, 4242) - // Standard Error: 11_818 - .saturating_add(Weight::from_parts(116_297, 0).saturating_mul(t.into())) + // Minimum execution time: 51_878_000 picoseconds. + Weight::from_parts(54_513_477, 4242) + // Standard Error: 12_281 + .saturating_add(Weight::from_parts(126_605, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Reasons` (r:0 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. fn slash_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `269` // Estimated: `3734` - // Minimum execution time: 14_133_000 picoseconds. - Weight::from_parts(14_957_547, 3734) - // Standard Error: 2_765 - .saturating_add(Weight::from_parts(22_138, 0).saturating_mul(t.into())) + // Minimum execution time: 11_800_000 picoseconds. + Weight::from_parts(12_520_984, 3734) + // Standard Error: 2_360 + .saturating_add(Weight::from_parts(28_777, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 275e1c01f927217726907f3e6045fc0b0d3681e5..66e0bef1436f1a197a5d450680c88a72ddd885e0 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -21,6 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } @@ -36,6 +37,7 @@ pallet-balances = { path = "../balances" } default = ["std"] std = [ "codec/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "pallet-balances/std", @@ -46,6 +48,13 @@ std = [ "sp-runtime/std", "sp-std/std", ] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml index 7640cc815e56c7fcecbb7d09c61c81635ca22491..528f29e8e4ae7738ea9d72ed8dcd54a9c6f659b0 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -19,6 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] # Substrate dependencies sp-runtime = { path = "../../../primitives/runtime", default-features = false } sp-std = { path = "../../../primitives/std", default-features = false } +frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } pallet-asset-conversion = { path = "../../asset-conversion", default-features = false } @@ -37,6 +38,7 @@ pallet-balances = { path = "../../balances" } default = ["std"] std = [ "codec/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "pallet-asset-conversion/std", @@ -50,6 +52,16 @@ std = [ "sp-std/std", "sp-storage/std", ] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-asset-conversion/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md b/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md index eccba773673e690a6d415a4c61d267ba75a1c12e..fcd1527526e98ecf45c7979bb40a2d59aad93452 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md @@ -16,6 +16,6 @@ asset. ### Integration This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means you should include both pallets in your `construct_runtime` macro, but only include this -pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). +pallet's [`TransactionExtension`] ([`ChargeAssetTxPayment`]). License: Apache-2.0 diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..0bffb991e4a3b53eac2c385f6e901eeef0c6800c --- /dev/null +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs @@ -0,0 +1,125 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Asset Conversion Tx Payment Pallet's transaction extension + +use super::*; +use crate::Pallet; +use frame_benchmarking::v2::*; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + pallet_prelude::*, +}; +use frame_system::RawOrigin; +use sp_runtime::traits::{AsSystemOriginSigner, DispatchTransaction, Dispatchable}; + +#[benchmarks(where + T::RuntimeCall: Dispatchable, + AssetBalanceOf: Send + Sync, + BalanceOf: Send + + Sync + + From + + Into> + + Into> + + From>, + ChargeAssetIdOf: Send + Sync + Default, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn charge_asset_tx_payment_zero() { + let caller: T::AccountId = whitelisted_caller(); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(0u64.into(), None); + let inner = frame_system::Call::remark { remark: vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + weight: Weight::zero(), + class: DispatchClass::Normal, + pays_fee: Pays::No, + }; + let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }; + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_native() { + let caller: T::AccountId = whitelisted_caller(); + let (fun_asset_id, _) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool(fun_asset_id, caller.clone()); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(10u64.into(), None); + let inner = frame_system::Call::remark { remark: vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + weight: Weight::from_parts(10, 0), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_asset() { + let caller: T::AccountId = whitelisted_caller(); + let (fun_asset_id, asset_id) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool(fun_asset_id, caller.clone()); + + let tip = 10u64.into(); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(tip, Some(asset_id)); + let inner = frame_system::Call::remark { remark: vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + weight: Weight::from_parts(10, 0), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 0, |_| Ok( + post_info + )) + .unwrap() + .is_ok()); + } + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); +} diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs index ed0ed56e6e074b49db7abf6d43ddb9305d7fb616..8ef7c2ba38e27f8b50c3489504db8ba195b88cf0 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs @@ -20,8 +20,8 @@ //! //! ## Overview //! -//! This pallet provides a `SignedExtension` with an optional `AssetId` that specifies the asset -//! to be used for payment (defaulting to the native token on `None`). It expects an +//! This pallet provides a `TransactionExtension` with an optional `AssetId` that specifies the +//! asset to be used for payment (defaulting to the native token on `None`). It expects an //! [`OnChargeAssetTransaction`] implementation analogous to [`pallet-transaction-payment`]. The //! included [`AssetConversionAdapter`] (implementing [`OnChargeAssetTransaction`]) determines the //! fee amount by converting the fee calculated by [`pallet-transaction-payment`] in the native @@ -31,7 +31,7 @@ //! //! This pallet does not have any dispatchable calls or storage. It wraps FRAME's Transaction //! Payment pallet and functions as a replacement. This means you should include both pallets in -//! your `construct_runtime` macro, but only include this pallet's [`SignedExtension`] +//! your `construct_runtime` macro, but only include this pallet's [`TransactionExtension`] //! ([`ChargeAssetTxPayment`]). //! //! ## Terminology @@ -53,23 +53,29 @@ use frame_support::{ }, DefaultNoBound, }; -use pallet_transaction_payment::OnChargeTransaction; +use pallet_transaction_payment::{ChargeTransactionPayment, OnChargeTransaction}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension, Zero}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, + TransactionExtension, TransactionExtensionBase, ValidateResult, Zero, }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, }; #[cfg(test)] mod mock; #[cfg(test)] mod tests; +pub mod weights; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; mod payment; -use frame_support::traits::tokens::AssetId; +use frame_support::{pallet_prelude::Weight, traits::tokens::AssetId}; pub use payment::*; +pub use weights::WeightInfo; /// Type aliases used for interaction with `OnChargeTransaction`. pub(crate) type OnChargeTransactionOf = @@ -125,11 +131,30 @@ pub mod pallet { type Fungibles: Balanced; /// The actual transaction charging logic that charges the fees. type OnChargeAssetTransaction: OnChargeAssetTransaction; + /// The weight information of this pallet. + type WeightInfo: WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + /// Benchmark helper + type BenchmarkHelper: BenchmarkHelperTrait< + Self::AccountId, + <::Fungibles as Inspect>::AssetId, + <::OnChargeAssetTransaction as OnChargeAssetTransaction>::AssetId, + >; } #[pallet::pallet] pub struct Pallet(_); + #[cfg(feature = "runtime-benchmarks")] + /// Helper trait to benchmark the `ChargeAssetTxPayment` transaction extension. + pub trait BenchmarkHelperTrait { + /// Returns the `AssetId` to be used in the liquidity pool by the benchmarking code. + fn create_asset_id_parameter(id: u32) -> (FunAssetIdParameter, AssetIdParameter); + /// Create a liquidity pool for a given asset and sufficiently endow accounts to benchmark + /// the extension. + fn setup_balances_and_pool(asset_id: FunAssetIdParameter, account: AccountId); + } + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -179,9 +204,8 @@ where who: &T::AccountId, call: &T::RuntimeCall, info: &DispatchInfoOf, - len: usize, + fee: BalanceOf, ) -> Result<(BalanceOf, InitialPayment), TransactionValidityError> { - let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); if fee.is_zero() { Ok((fee, InitialPayment::Nothing)) @@ -225,9 +249,8 @@ impl sp_std::fmt::Debug for ChargeAssetTxPayment { } } -impl SignedExtension for ChargeAssetTxPayment +impl TransactionExtensionBase for ChargeAssetTxPayment where - T::RuntimeCall: Dispatchable, AssetBalanceOf: Send + Sync, BalanceOf: Send + Sync @@ -238,109 +261,145 @@ where ChargeAssetIdOf: Send + Sync, { const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); + type Implicit = (); + + fn weight(&self) -> Weight { + if self.asset_id.is_some() { + ::WeightInfo::charge_asset_tx_payment_asset() + } else { + ::WeightInfo::charge_asset_tx_payment_native() + } + } +} + +impl TransactionExtension for ChargeAssetTxPayment +where + T::RuntimeCall: Dispatchable, + AssetBalanceOf: Send + Sync, + BalanceOf: Send + + Sync + + From + + Into> + + Into> + + From>, + ChargeAssetIdOf: Send + Sync, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, +{ + type Val = ( + // tip + BalanceOf, + // who paid the fee + T::AccountId, + // transaction fee + BalanceOf, + ); type Pre = ( // tip BalanceOf, // who paid the fee - Self::AccountId, + T::AccountId, // imbalance resulting from withdrawing the fee InitialPayment, // asset_id for the transaction payment Option>, ); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { - use pallet_transaction_payment::ChargeTransactionPayment; - let (fee, _) = self.withdraw_fee(who, call, info, len)?; + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + // Non-mutating call of `compute_fee` to calculate the fee used in the transaction priority. + let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); let priority = ChargeTransactionPayment::::get_priority(info, len, self.tip, fee); - Ok(ValidTransaction { priority, ..Default::default() }) + let validity = ValidTransaction { priority, ..Default::default() }; + let val = (self.tip, who.clone(), fee); + Ok((validity, val, origin)) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + val: Self::Val, + _origin: &::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + _len: usize, + _context: &Context, ) -> Result { - let (_fee, initial_payment) = self.withdraw_fee(who, call, info, len)?; - Ok((self.tip, who.clone(), initial_payment, self.asset_id)) + let (tip, who, fee) = val; + // Mutating call of `withdraw_fee` to actually charge for the transaction. + let (_fee, initial_payment) = self.withdraw_fee(&who, call, info, fee)?; + Ok((tip, who, initial_payment, self.asset_id.clone())) } fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, + _context: &Context, ) -> Result<(), TransactionValidityError> { - if let Some((tip, who, initial_payment, asset_id)) = pre { - match initial_payment { - InitialPayment::Native(already_withdrawn) => { - debug_assert!( - asset_id.is_none(), - "For that payment type the `asset_id` should be None" - ); - pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( - Some((tip, who, already_withdrawn)), + let (tip, who, initial_payment, asset_id) = pre; + match initial_payment { + InitialPayment::Native(already_withdrawn) => { + debug_assert!( + asset_id.is_none(), + "For that payment type the `asset_id` should be None" + ); + pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( + (tip, who, already_withdrawn), + info, + post_info, + len, + result, + &(), + )?; + }, + InitialPayment::Asset(already_withdrawn) => { + debug_assert!( + asset_id.is_some(), + "For that payment type the `asset_id` should be set" + ); + let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( + len as u32, info, post_info, tip, + ); + + if let Some(asset_id) = asset_id { + let (used_for_fee, received_exchanged, asset_consumed) = already_withdrawn; + let converted_fee = T::OnChargeAssetTransaction::correct_and_deposit_fee( + &who, info, post_info, - len, - result, + actual_fee.into(), + tip.into(), + used_for_fee.into(), + received_exchanged.into(), + asset_id.clone(), + asset_consumed.into(), )?; - }, - InitialPayment::Asset(already_withdrawn) => { - debug_assert!( - asset_id.is_some(), - "For that payment type the `asset_id` should be set" - ); - let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( - len as u32, info, post_info, tip, - ); - - if let Some(asset_id) = asset_id { - let (used_for_fee, received_exchanged, asset_consumed) = already_withdrawn; - let converted_fee = T::OnChargeAssetTransaction::correct_and_deposit_fee( - &who, - info, - post_info, - actual_fee.into(), - tip.into(), - used_for_fee.into(), - received_exchanged.into(), - asset_id.clone(), - asset_consumed.into(), - )?; - Pallet::::deposit_event(Event::::AssetTxFeePaid { - who, - actual_fee: converted_fee, - tip, - asset_id, - }); - } - }, - InitialPayment::Nothing => { - // `actual_fee` should be zero here for any signed extrinsic. It would be - // non-zero here in case of unsigned extrinsics as they don't pay fees but - // `compute_actual_fee` is not aware of them. In both cases it's fine to just - // move ahead without adjusting the fee, though, so we do nothing. - debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); - }, - } + Pallet::::deposit_event(Event::::AssetTxFeePaid { + who, + actual_fee: converted_fee, + tip, + asset_id, + }); + } + }, + InitialPayment::Nothing => { + // `actual_fee` should be zero here for any signed extrinsic. It would be + // non-zero here in case of unsigned extrinsics as they don't pay fees but + // `compute_actual_fee` is not aware of them. In both cases it's fine to just + // move ahead without adjusting the fee, though, so we do nothing. + debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); + }, } Ok(()) diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs index c8bf2eb8f440fd1203796fd89d9265403b3fdad0..853753d41cfb16b4b84d8ff4317a89ca32fbf309 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -168,12 +168,12 @@ impl OnUnbalanced> for DealWithFees } } +#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig as pallet_transaction_payment::DefaultConfig)] impl pallet_transaction_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = CurrencyAdapter; type WeightToFee = WeightToFee; type LengthToFee = TransactionByteFee; - type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; } @@ -269,4 +269,72 @@ impl Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; type OnChargeAssetTransaction = AssetConversionAdapter; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = Helper; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext() -> sp_io::TestExternalities { + let base_weight = 5; + let balance_factor = 100; + crate::tests::ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct Helper; + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelperTrait for Helper { + fn create_asset_id_parameter(id: u32) -> (u32, u32) { + (id, id) + } + + fn setup_balances_and_pool(asset_id: u32, account: u64) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + use sp_runtime::traits::StaticLookup; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + 42, /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = 12; + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), lp_provider, u64::MAX / 2)); + let lp_provider_account = ::Lookup::unlookup(lp_provider); + assert_ok!(Assets::mint_into(asset_id.into(), &lp_provider_account, u64::MAX / 2)); + + let token_1 = Box::new(NativeOrWithId::Native); + let token_2 = Box::new(NativeOrWithId::WithId(asset_id)); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider), + token_1.clone(), + token_2.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider), + token_1, + token_2, + (u32::MAX / 8).into(), // 1 desired + u32::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider_account, + )); + + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&account, u32::MAX.into()); + + let beneficiary = ::Lookup::unlookup(account); + let balance = 1000; + + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, account), balance); + } } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs index 62faed269d377cc1dc2b75091d79210401cd0a26..619077c0a5ef42e47e4f445ce1cd1623172a2580 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs @@ -28,7 +28,10 @@ use frame_support::{ use frame_system as system; use mock::{ExtrinsicBaseWeight, *}; use pallet_balances::Call as BalancesCall; -use sp_runtime::{traits::StaticLookup, BuildStorage}; +use sp_runtime::{ + traits::{DispatchTransaction, StaticLookup}, + BuildStorage, +}; const CALL: &::RuntimeCall = &RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); @@ -160,33 +163,35 @@ fn transaction_payment_in_native_possible() { .build() .execute_with(|| { let len = 10; - let pre = ChargeAssetTxPayment::::from(0, None) - .pre_dispatch(&1, CALL, &info_from_weight(WEIGHT_5), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, None) + .validate_and_prepare(Some(1).into(), CALL, &info_from_weight(WEIGHT_5), len) .unwrap(); let initial_balance = 10 * balance_factor; assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(WEIGHT_5), &default_post_info(), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); - let pre = ChargeAssetTxPayment::::from(5 /* tipped */, None) - .pre_dispatch(&2, CALL, &info_from_weight(WEIGHT_100), len) + let (pre, _) = ChargeAssetTxPayment::::from(5 /* tipped */, None) + .validate_and_prepare(Some(2).into(), CALL, &info_from_weight(WEIGHT_100), len) .unwrap(); let initial_balance_for_2 = 20 * balance_factor; assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 5); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(WEIGHT_100), &post_info_from_weight(WEIGHT_50), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 5); }); @@ -237,8 +242,8 @@ fn transaction_payment_in_asset_possible() { let fee_in_asset = input_quote.unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -247,11 +252,12 @@ fn transaction_payment_in_asset_possible() { assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(WEIGHT_5), // estimated tx weight &default_post_info(), // weight actually used == estimated len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); @@ -289,12 +295,8 @@ fn transaction_payment_in_asset_fails_if_no_pool_for_that_asset() { assert_eq!(Assets::balance(asset_id, caller), balance); let len = 10; - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)).pre_dispatch( - &caller, - CALL, - &info_from_weight(WEIGHT_5), - len, - ); + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len); // As there is no pool in the dex set up for this asset, conversion should fail. assert!(pre.is_err()); @@ -344,8 +346,8 @@ fn transaction_payment_without_fee() { assert_eq!(input_quote, Some(201)); let fee_in_asset = input_quote.unwrap(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len) .unwrap(); // assert that native balance is not used @@ -363,11 +365,12 @@ fn transaction_payment_without_fee() { assert_eq!(refund, 199); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(WEIGHT_5), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), + &() )); // caller should get refunded @@ -419,8 +422,8 @@ fn asset_transaction_payment_with_tip_and_refund() { assert_eq!(input_quote, Some(1206)); let fee_in_asset = input_quote.unwrap(); - let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_100), len) + let (pre, _) = ChargeAssetTxPayment::::from(tip, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_100), len) .unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); @@ -435,11 +438,12 @@ fn asset_transaction_payment_with_tip_and_refund() { .unwrap(); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(WEIGHT_100), &post_info_from_weight(WEIGHT_50), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(TipUnbalancedAmount::get(), tip); @@ -500,8 +504,8 @@ fn payment_from_account_with_only_assets() { .unwrap(); assert_eq!(fee_in_asset, 301); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len) .unwrap(); assert_eq!(Balances::free_balance(caller), ed); // check that fee was charged in the given asset @@ -516,11 +520,12 @@ fn payment_from_account_with_only_assets() { .unwrap(); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(WEIGHT_5), &default_post_info(), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset + refund); assert_eq!(Balances::free_balance(caller), 0); @@ -565,18 +570,19 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { // there will be no conversion when the fee is zero { - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` implies there are no fees assert_eq!(Assets::balance(asset_id, caller), balance); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance); } @@ -591,17 +597,23 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { ) .unwrap(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); }); @@ -641,8 +653,8 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { // calculated fee is greater than 0 assert!(fee > 0); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` implies no pre-dispatch fees @@ -658,62 +670,12 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the // initial fee) assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::Yes), len, - &Ok(()) - )); - assert_eq!(Assets::balance(asset_id, caller), balance); - }); -} - -#[test] -fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { - let base_weight = 1; - ExtBuilder::default() - .balance_factor(100) - .base_weight(Weight::from_parts(base_weight, 0)) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 100; - assert_ok!(Assets::force_create( - RuntimeOrigin::root(), - asset_id.into(), - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); - - // mint into the caller account - let caller = 333; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 1000; - - assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - - let weight = 1; - let len = 1; - ChargeAssetTxPayment::::pre_dispatch_unsigned( - CALL, - &info_from_weight(Weight::from_parts(weight, 0)), - len, - ) - .unwrap(); - - assert_eq!(Assets::balance(asset_id, caller), balance); - - // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the - // initial fee) - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - None, - &info_from_weight(Weight::from_parts(weight, 0)), - &post_info_from_pays(Pays::Yes), - len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance); }); diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..f95e49f80730318d4957538ded3812c661009be0 --- /dev/null +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs @@ -0,0 +1,150 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_conversion_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_asset_conversion_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_asset_conversion_tx_payment`. +pub trait WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight; + fn charge_asset_tx_payment_native() -> Weight; + fn charge_asset_tx_payment_asset() -> Weight; +} + +/// Weights for `pallet_asset_conversion_tx_payment` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 628_000 picoseconds. + Weight::from_parts(694_000, 0) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 34_410_000 picoseconds. + Weight::from_parts(35_263_000, 1733) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `888` + // Estimated: `6208` + // Minimum execution time: 112_432_000 picoseconds. + Weight::from_parts(113_992_000, 6208) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 628_000 picoseconds. + Weight::from_parts(694_000, 0) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 34_410_000 picoseconds. + Weight::from_parts(35_263_000, 1733) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `888` + // Estimated: `6208` + // Minimum execution time: 112_432_000 picoseconds. + Weight::from_parts(113_992_000, 6208) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } +} diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index 1da3237df0817c989899b9180af984da9766aa81..06d9c30792e6f8a93d43ce48a06a9c0d34f5cc55 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -66,6 +66,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] try-runtime = [ diff --git a/substrate/frame/transaction-payment/asset-tx-payment/README.md b/substrate/frame/transaction-payment/asset-tx-payment/README.md index fc860347d85fa37fc98890b5d4b2f56722040a8e..933ce13b0ee6f7a23110772a82b51ab15fc7dd2a 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/README.md +++ b/substrate/frame/transaction-payment/asset-tx-payment/README.md @@ -16,6 +16,6 @@ asset. ### Integration This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means you should include both pallets in your `construct_runtime` macro, but only include this -pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). +pallet's [`TransactionExtension`] ([`ChargeAssetTxPayment`]). License: Apache-2.0 diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..17e96e2b02562d226a50800ffbb2109bd66c276e --- /dev/null +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs @@ -0,0 +1,123 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Asset Tx Payment Pallet's transaction extension + +use super::*; +use crate::Pallet; +use frame_benchmarking::v2::*; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + pallet_prelude::*, +}; +use frame_system::RawOrigin; +use sp_runtime::traits::{AsSystemOriginSigner, DispatchTransaction, Dispatchable}; + +#[benchmarks(where + T::RuntimeCall: Dispatchable, + AssetBalanceOf: Send + Sync, + BalanceOf: Send + Sync + From + IsType>, + ChargeAssetIdOf: Send + Sync, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, + Credit: IsType>, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn charge_asset_tx_payment_zero() { + let caller: T::AccountId = whitelisted_caller(); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(0u32.into(), None); + let inner = frame_system::Call::remark { remark: vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + weight: Weight::zero(), + class: DispatchClass::Normal, + pays_fee: Pays::No, + }; + let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }; + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_native() { + let caller: T::AccountId = whitelisted_caller(); + let (fun_asset_id, _) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool(fun_asset_id, caller.clone()); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(10u32.into(), None); + let inner = frame_system::Call::remark { remark: vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + weight: Weight::from_parts(10, 0), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_asset() { + let caller: T::AccountId = whitelisted_caller(); + let (fun_asset_id, asset_id) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool( + fun_asset_id.clone(), + caller.clone(), + ); + let tip = 10u32.into(); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(tip, Some(asset_id)); + let inner = frame_system::Call::remark { remark: vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + weight: Weight::from_parts(10, 0), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 0, |_| Ok( + post_info + )) + .unwrap() + .is_ok()); + } + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); +} diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs index 753fae747a37ec914abb439fc3829c4caca9a448..991b5f314061639665ecd825ea1ace2ff38745ce 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -31,7 +31,7 @@ //! This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means //! you should include both pallets in your `construct_runtime` macro, but only include this -//! pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). +//! pallet's [`TransactionExtension`] ([`ChargeAssetTxPayment`]). #![cfg_attr(not(feature = "std"), no_std)] @@ -40,6 +40,7 @@ use sp_std::prelude::*; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, + pallet_prelude::Weight, traits::{ tokens::{ fungibles::{Balanced, Credit, Inspect}, @@ -52,10 +53,11 @@ use frame_support::{ use pallet_transaction_payment::OnChargeTransaction; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension, Zero}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, + TransactionExtension, TransactionExtensionBase, Zero, }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, }; #[cfg(test)] @@ -63,8 +65,14 @@ mod mock; #[cfg(test)] mod tests; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + mod payment; +pub mod weights; + pub use payment::*; +pub use weights::WeightInfo; /// Type aliases used for interaction with `OnChargeTransaction`. pub(crate) type OnChargeTransactionOf = @@ -120,11 +128,30 @@ pub mod pallet { type Fungibles: Balanced; /// The actual transaction charging logic that charges the fees. type OnChargeAssetTransaction: OnChargeAssetTransaction; + /// The weight information of this pallet. + type WeightInfo: WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + /// Benchmark helper + type BenchmarkHelper: BenchmarkHelperTrait< + Self::AccountId, + <::Fungibles as Inspect>::AssetId, + <::OnChargeAssetTransaction as OnChargeAssetTransaction>::AssetId, + >; } #[pallet::pallet] pub struct Pallet(_); + #[cfg(feature = "runtime-benchmarks")] + /// Helper trait to benchmark the `ChargeAssetTxPayment` transaction extension. + pub trait BenchmarkHelperTrait { + /// Returns the `AssetId` to be used in the liquidity pool by the benchmarking code. + fn create_asset_id_parameter(id: u32) -> (FunAssetIdParameter, AssetIdParameter); + /// Create a liquidity pool for a given asset and sufficiently endow accounts to benchmark + /// the extension. + fn setup_balances_and_pool(asset_id: FunAssetIdParameter, account: AccountId); + } + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -172,9 +199,8 @@ where who: &T::AccountId, call: &T::RuntimeCall, info: &DispatchInfoOf, - len: usize, + fee: BalanceOf, ) -> Result<(BalanceOf, InitialPayment), TransactionValidityError> { - let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); if fee.is_zero() { Ok((fee, InitialPayment::Nothing)) @@ -209,104 +235,139 @@ impl sp_std::fmt::Debug for ChargeAssetTxPayment { } } -impl SignedExtension for ChargeAssetTxPayment +impl TransactionExtensionBase for ChargeAssetTxPayment where - T::RuntimeCall: Dispatchable, AssetBalanceOf: Send + Sync, BalanceOf: Send + Sync + From + IsType>, ChargeAssetIdOf: Send + Sync, Credit: IsType>, { const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); + type Implicit = (); + + fn weight(&self) -> Weight { + if self.asset_id.is_some() { + ::WeightInfo::charge_asset_tx_payment_asset() + } else { + ::WeightInfo::charge_asset_tx_payment_native() + } + } +} + +impl TransactionExtension for ChargeAssetTxPayment +where + T::RuntimeCall: Dispatchable, + AssetBalanceOf: Send + Sync, + BalanceOf: Send + Sync + From + IsType>, + ChargeAssetIdOf: Send + Sync, + Credit: IsType>, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, +{ + type Val = ( + // tip + BalanceOf, + // who paid the fee + T::AccountId, + // transaction fee + BalanceOf, + ); type Pre = ( // tip BalanceOf, // who paid the fee - Self::AccountId, + T::AccountId, // imbalance resulting from withdrawing the fee InitialPayment, // asset_id for the transaction payment Option>, ); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { use pallet_transaction_payment::ChargeTransactionPayment; - let (fee, _) = self.withdraw_fee(who, call, info, len)?; + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + // Non-mutating call of `compute_fee` to calculate the fee used in the transaction priority. + let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); let priority = ChargeTransactionPayment::::get_priority(info, len, self.tip, fee); - Ok(ValidTransaction { priority, ..Default::default() }) + let val = (self.tip, who.clone(), fee); + let validity = ValidTransaction { priority, ..Default::default() }; + Ok((validity, val, origin)) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + val: Self::Val, + _origin: &::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + _len: usize, + _context: &Context, ) -> Result { - let (_fee, initial_payment) = self.withdraw_fee(who, call, info, len)?; - Ok((self.tip, who.clone(), initial_payment, self.asset_id)) + let (tip, who, fee) = val; + // Mutating call of `withdraw_fee` to actually charge for the transaction. + let (_fee, initial_payment) = self.withdraw_fee(&who, call, info, fee)?; + Ok((tip, who, initial_payment, self.asset_id)) } fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, + _context: &Context, ) -> Result<(), TransactionValidityError> { - if let Some((tip, who, initial_payment, asset_id)) = pre { - match initial_payment { - InitialPayment::Native(already_withdrawn) => { - pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( - Some((tip, who, already_withdrawn)), + let (tip, who, initial_payment, asset_id) = pre; + match initial_payment { + InitialPayment::Native(already_withdrawn) => { + pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( + (tip, who, already_withdrawn), + info, + post_info, + len, + result, + &(), + )?; + }, + InitialPayment::Asset(already_withdrawn) => { + let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( + len as u32, info, post_info, tip, + ); + + let (converted_fee, converted_tip) = + T::OnChargeAssetTransaction::correct_and_deposit_fee( + &who, info, post_info, - len, - result, + actual_fee.into(), + tip.into(), + already_withdrawn.into(), )?; - }, - InitialPayment::Asset(already_withdrawn) => { - let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( - len as u32, info, post_info, tip, - ); - - let (converted_fee, converted_tip) = - T::OnChargeAssetTransaction::correct_and_deposit_fee( - &who, - info, - post_info, - actual_fee.into(), - tip.into(), - already_withdrawn.into(), - )?; - Pallet::::deposit_event(Event::::AssetTxFeePaid { - who, - actual_fee: converted_fee, - tip: converted_tip, - asset_id, - }); - }, - InitialPayment::Nothing => { - // `actual_fee` should be zero here for any signed extrinsic. It would be - // non-zero here in case of unsigned extrinsics as they don't pay fees but - // `compute_actual_fee` is not aware of them. In both cases it's fine to just - // move ahead without adjusting the fee, though, so we do nothing. - debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); - }, - } + Pallet::::deposit_event(Event::::AssetTxFeePaid { + who, + actual_fee: converted_fee, + tip: converted_tip, + asset_id, + }); + }, + InitialPayment::Nothing => { + // `actual_fee` should be zero here for any signed extrinsic. It would be + // non-zero here in case of unsigned extrinsics as they don't pay fees but + // `compute_actual_fee` is not aware of them. In both cases it's fine to just + // move ahead without adjusting the fee, though, so we do nothing. + debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); + }, } Ok(()) diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs index 1f335b4f6c4ab5cc59a4815a2160328aa7f584df..bb484795e82598f52f518edb15dcf26443d9f118 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -136,12 +136,12 @@ impl WeightToFeeT for TransactionByteFee { } } +#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig as pallet_transaction_payment::DefaultConfig)] impl pallet_transaction_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = CurrencyAdapter; type WeightToFee = WeightToFee; type LengthToFee = TransactionByteFee; - type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; } @@ -205,4 +205,56 @@ impl Config for Runtime { pallet_assets::BalanceToAssetBalance, CreditToBlockAuthor, >; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = Helper; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext() -> sp_io::TestExternalities { + let base_weight = 5; + let balance_factor = 100; + crate::tests::ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct Helper; + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelperTrait for Helper { + fn create_asset_id_parameter(id: u32) -> (u32, u32) { + (id.into(), id.into()) + } + + fn setup_balances_and_pool(asset_id: u32, account: u64) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + use sp_runtime::traits::StaticLookup; + let min_balance = 1; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 2; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 1000; + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&account, u32::MAX.into()); + + let beneficiary = ::Lookup::unlookup(account); + let balance = 1000; + + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, account), balance); + } } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs index 8df98ceda9971565788576ee839792f6d58da2b0..42b8f2cf5fabb8b74aa0a99ac2b67fefa656728a 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -25,7 +25,10 @@ use frame_support::{ use frame_system as system; use mock::{ExtrinsicBaseWeight, *}; use pallet_balances::Call as BalancesCall; -use sp_runtime::{traits::StaticLookup, BuildStorage}; +use sp_runtime::{ + traits::{DispatchTransaction, StaticLookup}, + BuildStorage, +}; const CALL: &::RuntimeCall = &RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); @@ -116,33 +119,45 @@ fn transaction_payment_in_native_possible() { .build() .execute_with(|| { let len = 10; - let pre = ChargeAssetTxPayment::::from(0, None) - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_parts(5, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, None) + .validate_and_prepare( + Some(1).into(), + CALL, + &info_from_weight(Weight::from_parts(5, 0)), + len, + ) .unwrap(); let initial_balance = 10 * balance_factor; assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(5, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); - let pre = ChargeAssetTxPayment::::from(5 /* tipped */, None) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(5 /* tipped */, None) + .validate_and_prepare( + Some(2).into(), + CALL, + &info_from_weight(Weight::from_parts(100, 0)), + len, + ) .unwrap(); let initial_balance_for_2 = 20 * balance_factor; assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 5); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(100, 0)), &post_info_from_weight(Weight::from_parts(50, 0)), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 5); }); @@ -179,8 +194,13 @@ fn transaction_payment_in_asset_possible() { // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -189,11 +209,12 @@ fn transaction_payment_in_asset_possible() { assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance - fee); // check that the block author gets rewarded @@ -232,8 +253,13 @@ fn transaction_payment_without_fee() { // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -242,11 +268,12 @@ fn transaction_payment_without_fee() { assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(weight, 0)), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), + &() )); // caller should be refunded assert_eq!(Assets::balance(asset_id, caller), balance); @@ -287,18 +314,24 @@ fn asset_transaction_payment_with_tip_and_refund() { // existential deposit let fee_with_tip = (base_weight + weight + len as u64 + tip) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(tip, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_with_tip); let final_weight = 50; assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(weight, 0)), &post_info_from_weight(Weight::from_parts(final_weight, 0)), len, - &Ok(()) + &Ok(()), + &() )); let final_fee = fee_with_tip - (weight - final_weight) * min_balance / ExistentialDeposit::get(); @@ -339,19 +372,25 @@ fn payment_from_account_with_only_assets() { // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); assert_eq!(Balances::free_balance(caller), 0); // check that fee was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - fee); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance - fee); assert_eq!(Balances::free_balance(caller), 0); @@ -372,7 +411,12 @@ fn payment_only_with_existing_sufficient_asset() { let len = 10; // pre_dispatch fails for non-existent asset assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len + ) .is_err()); // create the non-sufficient asset @@ -386,7 +430,12 @@ fn payment_only_with_existing_sufficient_asset() { )); // pre_dispatch fails for non-sufficient asset assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len + ) .is_err()); }); } @@ -424,33 +473,40 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { // naive fee calculation would round down to zero assert_eq!(fee, 0); { - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` still implies no fees assert_eq!(Assets::balance(asset_id, caller), balance); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance); } - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); // check that at least one coin was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - 1); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance - 1); }); @@ -488,8 +544,8 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); // calculated fee is greater than 0 assert!(fee > 0); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` implies no pre-dispatch fees assert_eq!(Assets::balance(asset_id, caller), balance); @@ -503,60 +559,12 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the // initial fee) assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::Yes), len, - &Ok(()) - )); - assert_eq!(Assets::balance(asset_id, caller), balance); - }); -} - -#[test] -fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { - let base_weight = 1; - ExtBuilder::default() - .balance_factor(100) - .base_weight(Weight::from_parts(base_weight, 0)) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 100; - assert_ok!(Assets::force_create( - RuntimeOrigin::root(), - asset_id.into(), - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); - - // mint into the caller account - let caller = 333; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 100; - assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - let weight = 1; - let len = 1; - ChargeAssetTxPayment::::pre_dispatch_unsigned( - CALL, - &info_from_weight(Weight::from_parts(weight, 0)), - len, - ) - .unwrap(); - - assert_eq!(Assets::balance(asset_id, caller), balance); - - // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the - // initial fee) - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - None, - &info_from_weight(Weight::from_parts(weight, 0)), - &post_info_from_pays(Pays::Yes), - len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance); }); diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..1af1c94177d26b3316d506d1419a3d6cb052d4c2 --- /dev/null +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs @@ -0,0 +1,146 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_asset_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_asset_tx_payment`. +pub trait WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight; + fn charge_asset_tx_payment_native() -> Weight; + fn charge_asset_tx_payment_asset() -> Weight; +} + +/// Weights for `pallet_asset_tx_payment` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 542_000 picoseconds. + Weight::from_parts(597_000, 0) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 33_162_000 picoseconds. + Weight::from_parts(34_716_000, 1733) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `747` + // Estimated: `3675` + // Minimum execution time: 44_230_000 picoseconds. + Weight::from_parts(45_297_000, 3675) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 542_000 picoseconds. + Weight::from_parts(597_000, 0) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 33_162_000 picoseconds. + Weight::from_parts(34_716_000, 1733) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `747` + // Estimated: `3675` + // Minimum execution time: 44_230_000 picoseconds. + Weight::from_parts(45_297_000, 3675) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } +} diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs index 6c34c26ce9236dd76ff90224d3ce2d59dbd3157a..9810ea08c790d2ad964d5bf98906d12b76fe8f91 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -21,7 +21,7 @@ //! //! ## Overview //! -//! It does this by wrapping an existing [`SignedExtension`] implementation (e.g. +//! It does this by wrapping an existing [`TransactionExtension`] implementation (e.g. //! [`pallet-transaction-payment`]) and checking if the dispatchable is feeless before applying the //! wrapped extension. If the dispatchable is indeed feeless, the extension is skipped and a custom //! event is emitted instead. Otherwise, the extension is applied as usual. @@ -31,7 +31,8 @@ //! //! This pallet wraps an existing transaction payment pallet. This means you should both pallets //! in your `construct_runtime` macro and include this pallet's -//! [`SignedExtension`] ([`SkipCheckIfFeeless`]) that would accept the existing one as an argument. +//! [`TransactionExtension`] ([`SkipCheckIfFeeless`]) that would accept the existing one as an +//! argument. #![cfg_attr(not(feature = "std"), no_std)] @@ -42,7 +43,10 @@ use frame_support::{ }; use scale_info::{StaticTypeInfo, TypeInfo}; use sp_runtime::{ - traits::{DispatchInfoOf, PostDispatchInfoOf, SignedExtension}, + traits::{ + DispatchInfoOf, OriginOf, PostDispatchInfoOf, TransactionExtension, + TransactionExtensionBase, ValidateResult, + }, transaction_validity::TransactionValidityError, }; @@ -70,11 +74,11 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// A transaction fee was skipped. - FeeSkipped { who: T::AccountId }, + FeeSkipped { origin: ::PalletsOrigin }, } } -/// A [`SignedExtension`] that skips the wrapped extension if the dispatchable is feeless. +/// A [`TransactionExtension`] that skips the wrapped extension if the dispatchable is feeless. #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct SkipCheckIfFeeless(pub S, sp_std::marker::PhantomData); @@ -103,53 +107,96 @@ impl From for SkipCheckIfFeeless { } } -impl> SignedExtension +pub enum Intermediate { + /// The wrapped extension should be applied. + Apply(T), + /// The wrapped extension should be skipped. + Skip(O), +} +use Intermediate::*; + +impl TransactionExtensionBase for SkipCheckIfFeeless -where - S::Call: CheckIfFeeless>, { - type AccountId = T::AccountId; - type Call = S::Call; - type AdditionalSigned = S::AdditionalSigned; - type Pre = (Self::AccountId, Option<::Pre>); // From the outside this extension should be "invisible", because it just extends the wrapped // extension with an extra check in `pre_dispatch` and `post_dispatch`. Thus, we should forward // the identifier of the wrapped extension to let wallets see this extension as it would only be // the wrapped extension itself. const IDENTIFIER: &'static str = S::IDENTIFIER; + type Implicit = S::Implicit; + + fn implicit(&self) -> Result { + self.0.implicit() + } - fn additional_signed(&self) -> Result { - self.0.additional_signed() + fn weight(&self) -> frame_support::weights::Weight { + self.0.weight() } +} - fn pre_dispatch( +impl> + TransactionExtension for SkipCheckIfFeeless +where + T::RuntimeCall: CheckIfFeeless>, +{ + type Val = Intermediate as OriginTrait>::PalletsOrigin>; + type Pre = Intermediate as OriginTrait>::PalletsOrigin>; + + fn validate( + &self, + origin: OriginOf, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + context: &mut Context, + self_implicit: S::Implicit, + inherited_implication: &impl Encode, + ) -> ValidateResult { + if call.is_feeless(&origin) { + Ok((Default::default(), Skip(origin.caller().clone()), origin)) + } else { + let (x, y, z) = self.0.validate( + origin, + call, + info, + len, + context, + self_implicit, + inherited_implication, + )?; + Ok((x, Apply(y), z)) + } + } + + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + val: Self::Val, + origin: &OriginOf, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, + context: &Context, ) -> Result { - if call.is_feeless(&::RuntimeOrigin::signed(who.clone())) { - Ok((who.clone(), None)) - } else { - Ok((who.clone(), Some(self.0.pre_dispatch(who, call, info, len)?))) + match val { + Apply(val) => self.0.prepare(val, origin, call, info, len, context).map(Apply), + Skip(origin) => Ok(Skip(origin)), } } fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, + context: &Context, ) -> Result<(), TransactionValidityError> { - if let Some(pre) = pre { - if let Some(pre) = pre.1 { - S::post_dispatch(Some(pre), info, post_info, len, result)?; - } else { - Pallet::::deposit_event(Event::::FeeSkipped { who: pre.0 }); - } + match pre { + Apply(pre) => S::post_dispatch(pre, info, post_info, len, result, context), + Skip(origin) => { + Pallet::::deposit_event(Event::::FeeSkipped { origin }); + Ok(()) + }, } - Ok(()) } } diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs index 17c4c773997eb591c1f8aac06a607c3e35df8db0..06948de4c3ce972ba0e38eddac6ae3ab23804bbd 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs @@ -18,9 +18,12 @@ use crate as pallet_skip_feeless_payment; use frame_support::{derive_impl, parameter_types}; use frame_system as system; +use sp_runtime::{ + impl_tx_ext_default, + traits::{OriginOf, TransactionExtension}, +}; type Block = frame_system::mocking::MockBlock; -type AccountId = u64; #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { @@ -38,21 +41,22 @@ parameter_types! { #[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] pub struct DummyExtension; -impl SignedExtension for DummyExtension { - type AccountId = AccountId; - type Call = RuntimeCall; - type AdditionalSigned = (); - type Pre = (); +impl TransactionExtensionBase for DummyExtension { const IDENTIFIER: &'static str = "DummyExtension"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - fn pre_dispatch( + type Implicit = (); +} +impl TransactionExtension for DummyExtension { + type Val = (); + type Pre = (); + impl_tx_ext_default!(RuntimeCall; C; validate); + fn prepare( self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, + _val: Self::Val, + _origin: &OriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, _len: usize, + _context: &C, ) -> Result { PreDispatchCount::mutate(|c| *c += 1); Ok(()) diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs index 4b4dd6997418f9c9e39ebe0ab294051a688f2f87..96b4af7e203e0bcbd1773969f2baefdfe24150d6 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs @@ -16,18 +16,19 @@ use super::*; use crate::mock::{pallet_dummy::Call, DummyExtension, PreDispatchCount, Runtime, RuntimeCall}; use frame_support::dispatch::DispatchInfo; +use sp_runtime::traits::DispatchTransaction; #[test] fn skip_feeless_payment_works() { let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); SkipCheckIfFeeless::::from(DummyExtension) - .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); assert_eq!(PreDispatchCount::get(), 1); let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); SkipCheckIfFeeless::::from(DummyExtension) - .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); assert_eq!(PreDispatchCount::get(), 1); } diff --git a/substrate/frame/transaction-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..d63c5a7d746e4cb5030438a2c9c82b1a307f965e --- /dev/null +++ b/substrate/frame/transaction-payment/src/benchmarking.rs @@ -0,0 +1,81 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Transaction Payment Pallet's transaction extension + +use super::*; +use crate::Pallet; +use frame_benchmarking::v2::*; +use frame_support::dispatch::{DispatchInfo, PostDispatchInfo}; +use frame_system::{EventRecord, RawOrigin}; +use sp_runtime::traits::{DispatchTransaction, Dispatchable}; + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +#[benchmarks(where + T: Config, + T::RuntimeCall: Dispatchable, + BalanceOf: Send + Sync + From, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn charge_transaction_payment() { + let caller: T::AccountId = whitelisted_caller(); + >::endow_account( + &caller, + >::minimum_balance() * 1000.into(), + ); + let tip = >::minimum_balance(); + let ext: ChargeTransactionPayment = ChargeTransactionPayment::from(tip); + let inner = frame_system::Call::remark { remark: vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + weight: Weight::from_parts(100, 0), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 10, |_| Ok( + post_info + )) + .unwrap() + .is_ok()); + } + + let actual_fee = Pallet::::compute_actual_fee(10, &info, &post_info, tip); + assert_last_event::( + Event::::TransactionFeePaid { who: caller, actual_fee, tip }.into(), + ); + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); +} diff --git a/substrate/frame/transaction-payment/src/lib.rs b/substrate/frame/transaction-payment/src/lib.rs index efadfd60bdd30126f8627fccea7c02f4c42c68a3..6b17616bdc7eb1ba22dddb135926b0ab83b5592e 100644 --- a/substrate/frame/transaction-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/src/lib.rs @@ -62,23 +62,28 @@ pub use payment::*; use sp_runtime::{ traits::{ Convert, DispatchInfoOf, Dispatchable, One, PostDispatchInfoOf, SaturatedConversion, - Saturating, SignedExtension, Zero, + Saturating, TransactionExtension, TransactionExtensionBase, Zero, }, transaction_validity::{ - TransactionPriority, TransactionValidity, TransactionValidityError, ValidTransaction, + InvalidTransaction, TransactionPriority, TransactionValidityError, ValidTransaction, }, FixedPointNumber, FixedU128, Perbill, Perquintill, RuntimeDebug, }; use sp_std::prelude::*; pub use types::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; +pub use weights::WeightInfo; #[cfg(test)] mod mock; #[cfg(test)] mod tests; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + mod payment; mod types; +pub mod weights; /// Fee multiplier. pub type Multiplier = FixedU128; @@ -335,6 +340,7 @@ pub mod pallet { type RuntimeEvent = (); type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = (); + type WeightInfo = (); } } @@ -387,6 +393,9 @@ pub mod pallet { /// transactions. #[pallet::constant] type OperationalFeeMultiplier: Get; + + /// The weight information of this pallet. + type WeightInfo: WeightInfo; } #[pallet::type_value] @@ -507,11 +516,11 @@ impl Pallet { // a very very little potential gain in the future. let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); - let partial_fee = if unchecked_extrinsic.is_signed().unwrap_or(false) { - Self::compute_fee(len, &dispatch_info, 0u32.into()) - } else { - // Unsigned extrinsics have no partial fee. + let partial_fee = if unchecked_extrinsic.is_bare() { + // Bare extrinsics have no partial fee. 0u32.into() + } else { + Self::compute_fee(len, &dispatch_info, 0u32.into()) }; let DispatchInfo { weight, class, .. } = dispatch_info; @@ -531,11 +540,11 @@ impl Pallet { let tip = 0u32.into(); - if unchecked_extrinsic.is_signed().unwrap_or(false) { - Self::compute_fee_details(len, &dispatch_info, tip) - } else { - // Unsigned extrinsics have no inclusion fee. + if unchecked_extrinsic.is_bare() { + // Bare extrinsics have no inclusion fee. FeeDetails { inclusion_fee: None, tip } + } else { + Self::compute_fee_details(len, &dispatch_info, tip) } } @@ -714,7 +723,7 @@ where who: &T::AccountId, call: &T::RuntimeCall, info: &DispatchInfoOf, - len: usize, + fee: BalanceOf, ) -> Result< ( BalanceOf, @@ -723,7 +732,6 @@ where TransactionValidityError, > { let tip = self.0; - let fee = Pallet::::compute_fee(len as u32, info, tip); <::OnChargeTransaction as OnChargeTransaction>::withdraw_fee( who, call, info, fee, tip, @@ -731,6 +739,22 @@ where .map(|i| (fee, i)) } + fn can_withdraw_fee( + &self, + who: &T::AccountId, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> Result, TransactionValidityError> { + let tip = self.0; + let fee = Pallet::::compute_fee(len as u32, info, tip); + + <::OnChargeTransaction as OnChargeTransaction>::can_withdraw_fee( + who, call, info, fee, tip, + )?; + Ok(fee) + } + /// Get an appropriate priority for a transaction with the given `DispatchInfo`, encoded length /// and user-included tip. /// @@ -817,67 +841,93 @@ impl sp_std::fmt::Debug for ChargeTransactionPayment { } } -impl SignedExtension for ChargeTransactionPayment +impl TransactionExtensionBase for ChargeTransactionPayment { + const IDENTIFIER: &'static str = "ChargeTransactionPayment"; + type Implicit = (); + + fn weight(&self) -> Weight { + T::WeightInfo::charge_transaction_payment() + } +} + +impl TransactionExtension + for ChargeTransactionPayment where BalanceOf: Send + Sync + From, T::RuntimeCall: Dispatchable, { - const IDENTIFIER: &'static str = "ChargeTransactionPayment"; - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); + type Val = ( + // tip + BalanceOf, + // who paid the fee + T::AccountId, + // computed fee + BalanceOf, + ); type Pre = ( // tip BalanceOf, - // who paid the fee - this is an option to allow for a Default impl. - Self::AccountId, + // who paid the fee + T::AccountId, // imbalance resulting from withdrawing the fee <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, ); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { - let (final_fee, _) = self.withdraw_fee(who, call, info, len)?; + _context: &mut Context, + _: (), + _implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + let who = frame_system::ensure_signed(origin.clone()) + .map_err(|_| InvalidTransaction::BadSigner)?; + let final_fee = self.can_withdraw_fee(&who, call, info, len)?; let tip = self.0; - Ok(ValidTransaction { - priority: Self::get_priority(info, len, tip, final_fee), - ..Default::default() - }) + Ok(( + ValidTransaction { + priority: Self::get_priority(info, len, tip, final_fee), + ..Default::default() + }, + (self.0, who, final_fee), + origin, + )) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + val: Self::Val, + _origin: &::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + _len: usize, + _context: &Context, ) -> Result { - let (_fee, imbalance) = self.withdraw_fee(who, call, info, len)?; - Ok((self.0, who.clone(), imbalance)) + let (tip, who, fee) = val; + // Mutating call to `withdraw_fee` to actually charge for the transaction. + let (_final_fee, imbalance) = self.withdraw_fee(&who, call, info, fee)?; + Ok((tip, who, imbalance)) } fn post_dispatch( - maybe_pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + (tip, who, imbalance): Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, _result: &DispatchResult, + _context: &Context, ) -> Result<(), TransactionValidityError> { - if let Some((tip, who, imbalance)) = maybe_pre { - let actual_fee = Pallet::::compute_actual_fee(len as u32, info, post_info, tip); - T::OnChargeTransaction::correct_and_deposit_fee( - &who, info, post_info, actual_fee, tip, imbalance, - )?; - Pallet::::deposit_event(Event::::TransactionFeePaid { who, actual_fee, tip }); - } + let actual_fee = Pallet::::compute_actual_fee(len as u32, info, post_info, tip); + T::OnChargeTransaction::correct_and_deposit_fee( + &who, info, post_info, actual_fee, tip, imbalance, + )?; + Pallet::::deposit_event(Event::::TransactionFeePaid { who, actual_fee, tip }); Ok(()) } } diff --git a/substrate/frame/transaction-payment/src/mock.rs b/substrate/frame/transaction-payment/src/mock.rs index 1ca2e3d734720455f8a2fdc9453c712d1f199796..eda234ffcae577f9794b4b310eb2492ff32843b4 100644 --- a/substrate/frame/transaction-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/src/mock.rs @@ -157,4 +157,14 @@ impl Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = TransactionByteFee; type FeeMultiplierUpdate = (); + type WeightInfo = (); +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext() -> sp_io::TestExternalities { + crate::tests::ExtBuilder::default() + .base_weight(Weight::from_parts(100, 0)) + .byte_fee(10) + .balance_factor(0) + .build() } diff --git a/substrate/frame/transaction-payment/src/payment.rs b/substrate/frame/transaction-payment/src/payment.rs index 886683f2e0b883aea0ccd4e769c902ee45bdf99a..a13e73b50b0e931579a21995ca466ca6b3de4621 100644 --- a/substrate/frame/transaction-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/src/payment.rs @@ -20,7 +20,7 @@ use crate::Config; use core::marker::PhantomData; use sp_runtime::{ - traits::{DispatchInfoOf, PostDispatchInfoOf, Saturating, Zero}, + traits::{CheckedSub, DispatchInfoOf, PostDispatchInfoOf, Saturating, Zero}, transaction_validity::InvalidTransaction, }; @@ -51,6 +51,17 @@ pub trait OnChargeTransaction { tip: Self::Balance, ) -> Result; + /// Check if the predicted fee from the transaction origin can be withdrawn. + /// + /// Note: The `fee` already includes the `tip`. + fn can_withdraw_fee( + who: &T::AccountId, + call: &T::RuntimeCall, + dispatch_info: &DispatchInfoOf, + fee: Self::Balance, + tip: Self::Balance, + ) -> Result<(), TransactionValidityError>; + /// After the transaction was executed the actual fee can be calculated. /// This function should refund any overpaid fees and optionally deposit /// the corrected amount. @@ -64,6 +75,12 @@ pub trait OnChargeTransaction { tip: Self::Balance, already_withdrawn: Self::LiquidityInfo, ) -> Result<(), TransactionValidityError>; + + #[cfg(feature = "runtime-benchmarks")] + fn endow_account(who: &T::AccountId, amount: Self::Balance); + + #[cfg(feature = "runtime-benchmarks")] + fn minimum_balance() -> Self::Balance; } /// Implements the transaction payment for a pallet implementing the `Currency` @@ -121,6 +138,33 @@ where } } + /// Check if the predicted fee from the transaction origin can be withdrawn. + /// + /// Note: The `fee` already includes the `tip`. + fn can_withdraw_fee( + who: &T::AccountId, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + fee: Self::Balance, + tip: Self::Balance, + ) -> Result<(), TransactionValidityError> { + if fee.is_zero() { + return Ok(()) + } + + let withdraw_reason = if tip.is_zero() { + WithdrawReasons::TRANSACTION_PAYMENT + } else { + WithdrawReasons::TRANSACTION_PAYMENT | WithdrawReasons::TIP + }; + + let new_balance = + C::free_balance(who).checked_sub(&fee).ok_or(InvalidTransaction::Payment)?; + C::ensure_can_withdraw(who, fee, withdraw_reason, new_balance) + .map(|_| ()) + .map_err(|_| InvalidTransaction::Payment.into()) + } + /// Hand the fee and the tip over to the `[OnUnbalanced]` implementation. /// Since the predicted fee might have been too high, parts of the fee may /// be refunded. @@ -153,4 +197,14 @@ where } Ok(()) } + + #[cfg(feature = "runtime-benchmarks")] + fn endow_account(who: &T::AccountId, amount: Self::Balance) { + let _ = C::deposit_creating(who, amount); + } + + #[cfg(feature = "runtime-benchmarks")] + fn minimum_balance() -> Self::Balance { + C::minimum_balance() + } } diff --git a/substrate/frame/transaction-payment/src/tests.rs b/substrate/frame/transaction-payment/src/tests.rs index d3a1721ccb9909b2f2a705d9bb9ddc9ebc7cb5ee..7a49f8111e2315de5784101fa7de86d7010b4c32 100644 --- a/substrate/frame/transaction-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/src/tests.rs @@ -21,11 +21,14 @@ use crate as pallet_transaction_payment; use codec::Encode; use sp_runtime::{ - testing::TestXt, traits::One, transaction_validity::InvalidTransaction, BuildStorage, + generic::UncheckedExtrinsic, + traits::{DispatchTransaction, One}, + transaction_validity::InvalidTransaction, + BuildStorage, }; use frame_support::{ - assert_noop, assert_ok, + assert_ok, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo}, traits::Currency, weights::Weight, @@ -128,44 +131,37 @@ fn default_post_info() -> PostDispatchInfo { PostDispatchInfo { actual_weight: None, pays_fee: Default::default() } } +type Ext = ChargeTransactionPayment; + #[test] -fn signed_extension_transaction_payment_work() { +fn transaction_extension_transaction_payment_work() { ExtBuilder::default() .balance_factor(10) .base_weight(Weight::from_parts(5, 0)) .build() .execute_with(|| { - let len = 10; - let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_parts(5, 0)), len) + let info = info_from_weight(Weight::from_parts(5, 0)); + Ext::from(0) + .test_run(Some(1).into(), CALL, &info, 10, |_| { + assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); + Ok(default_post_info()) + }) + .unwrap() .unwrap(); assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(5, 0)), - &default_post_info(), - len, - &Ok(()) - )); - assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); assert_eq!(FeeUnbalancedAmount::get(), 5 + 5 + 10); assert_eq!(TipUnbalancedAmount::get(), 0); FeeUnbalancedAmount::mutate(|a| *a = 0); - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let info = info_from_weight(Weight::from_parts(100, 0)); + Ext::from(5 /* tipped */) + .test_run(Some(2).into(), CALL, &info, 10, |_| { + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + Ok(post_info_from_weight(Weight::from_parts(50, 0))) + }) + .unwrap() .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(50, 0)), - len, - &Ok(()) - )); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); assert_eq!(FeeUnbalancedAmount::get(), 5 + 10 + 50); assert_eq!(TipUnbalancedAmount::get(), 5); @@ -173,43 +169,37 @@ fn signed_extension_transaction_payment_work() { } #[test] -fn signed_extension_transaction_payment_multiplied_refund_works() { +fn transaction_extension_transaction_payment_multiplied_refund_works() { ExtBuilder::default() .balance_factor(10) .base_weight(Weight::from_parts(5, 0)) .build() .execute_with(|| { - let len = 10; >::put(Multiplier::saturating_from_rational(3, 2)); - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let len = 10; + let origin = Some(2).into(); + let info = info_from_weight(Weight::from_parts(100, 0)); + Ext::from(5 /* tipped */) + .test_run(origin, CALL, &info, len, |_| { + // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); + Ok(post_info_from_weight(Weight::from_parts(50, 0))) + }) + .unwrap() .unwrap(); - // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(50, 0)), - len, - &Ok(()) - )); + // 75 (3/2 of the returned 50 units of weight) is refunded assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 75 - 5); }); } #[test] -fn signed_extension_transaction_payment_is_bounded() { +fn transaction_extension_transaction_payment_is_bounded() { ExtBuilder::default().balance_factor(1000).byte_fee(0).build().execute_with(|| { // maximum weight possible - assert_ok!(ChargeTransactionPayment::::from(0).pre_dispatch( - &1, - CALL, - &info_from_weight(Weight::MAX), - 10 - )); + let info = info_from_weight(Weight::MAX); + assert_ok!(Ext::from(0).validate_and_prepare(Some(1).into(), CALL, &info, 10)); // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( Balances::free_balance(&1), @@ -220,7 +210,7 @@ fn signed_extension_transaction_payment_is_bounded() { } #[test] -fn signed_extension_allows_free_transactions() { +fn transaction_extension_allows_free_transactions() { ExtBuilder::default() .base_weight(Weight::from_parts(100, 0)) .balance_factor(0) @@ -232,38 +222,28 @@ fn signed_extension_allows_free_transactions() { let len = 100; // This is a completely free (and thus wholly insecure/DoS-ridden) transaction. - let operational_transaction = DispatchInfo { + let op_tx = DispatchInfo { weight: Weight::from_parts(0, 0), class: DispatchClass::Operational, pays_fee: Pays::No, }; - assert_ok!(ChargeTransactionPayment::::from(0).validate( - &1, - CALL, - &operational_transaction, - len - )); + assert_ok!(Ext::from(0).validate_only(Some(1).into(), CALL, &op_tx, len)); // like a InsecureFreeNormal - let free_transaction = DispatchInfo { + let free_tx = DispatchInfo { weight: Weight::from_parts(0, 0), class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - assert_noop!( - ChargeTransactionPayment::::from(0).validate( - &1, - CALL, - &free_transaction, - len - ), + assert_eq!( + Ext::from(0).validate_only(Some(1).into(), CALL, &free_tx, len).unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::Payment), ); }); } #[test] -fn signed_ext_length_fee_is_also_updated_per_congestion() { +fn transaction_ext_length_fee_is_also_updated_per_congestion() { ExtBuilder::default() .base_weight(Weight::from_parts(5, 0)) .balance_factor(10) @@ -272,16 +252,15 @@ fn signed_ext_length_fee_is_also_updated_per_congestion() { // all fees should be x1.5 >::put(Multiplier::saturating_from_rational(3, 2)); let len = 10; - - assert_ok!(ChargeTransactionPayment::::from(10) // tipped - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_parts(3, 0)), len)); + let info = &info_from_weight(Weight::from_parts(3, 0)); + assert_ok!(Ext::from(10).validate_and_prepare(Some(1).into(), CALL, info, len)); assert_eq!( Balances::free_balance(1), 100 // original - - 10 // tip - - 5 // base - - 10 // len - - (3 * 3 / 2) // adjusted weight + - 10 // tip + - 5 // base + - 10 // len + - (3 * 3 / 2) // adjusted weight ); }) } @@ -291,62 +270,62 @@ fn query_info_and_fee_details_works() { let call = RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); let origin = 111111; let extra = (); - let xt = TestXt::new(call.clone(), Some((origin, extra))); + let xt = UncheckedExtrinsic::new_signed(call.clone(), origin, (), extra); let info = xt.get_dispatch_info(); let ext = xt.encode(); let len = ext.len() as u32; - let unsigned_xt = TestXt::<_, ()>::new(call, None); + let unsigned_xt = UncheckedExtrinsic::::new_bare(call); let unsigned_xt_info = unsigned_xt.get_dispatch_info(); ExtBuilder::default() - .base_weight(Weight::from_parts(5, 0)) - .weight_fee(2) - .build() - .execute_with(|| { - // all fees should be x1.5 - >::put(Multiplier::saturating_from_rational(3, 2)); - - assert_eq!( - TransactionPayment::query_info(xt.clone(), len), - RuntimeDispatchInfo { - weight: info.weight, - class: info.class, - partial_fee: 5 * 2 /* base * weight_fee */ - + len as u64 /* len * 1 */ - + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ - }, - ); - - assert_eq!( - TransactionPayment::query_info(unsigned_xt.clone(), len), - RuntimeDispatchInfo { - weight: unsigned_xt_info.weight, - class: unsigned_xt_info.class, - partial_fee: 0, - }, - ); - - assert_eq!( - TransactionPayment::query_fee_details(xt, len), - FeeDetails { - inclusion_fee: Some(InclusionFee { - base_fee: 5 * 2, - len_fee: len as u64, - adjusted_weight_fee: info - .weight - .min(BlockWeights::get().max_block) - .ref_time() as u64 * 2 * 3 / 2 - }), - tip: 0, - }, - ); - - assert_eq!( - TransactionPayment::query_fee_details(unsigned_xt, len), - FeeDetails { inclusion_fee: None, tip: 0 }, - ); - }); + .base_weight(Weight::from_parts(5, 0)) + .weight_fee(2) + .build() + .execute_with(|| { + // all fees should be x1.5 + >::put(Multiplier::saturating_from_rational(3, 2)); + + assert_eq!( + TransactionPayment::query_info(xt.clone(), len), + RuntimeDispatchInfo { + weight: info.weight, + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + + len as u64 /* len * 1 */ + + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ + }, + ); + + assert_eq!( + TransactionPayment::query_info(unsigned_xt.clone(), len), + RuntimeDispatchInfo { + weight: unsigned_xt_info.weight, + class: unsigned_xt_info.class, + partial_fee: 0, + }, + ); + + assert_eq!( + TransactionPayment::query_fee_details(xt, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, + len_fee: len as u64, + adjusted_weight_fee: info + .weight + .min(BlockWeights::get().max_block) + .ref_time() as u64 * 2 * 3 / 2 + }), + tip: 0, + }, + ); + + assert_eq!( + TransactionPayment::query_fee_details(unsigned_xt, len), + FeeDetails { inclusion_fee: None, tip: 0 }, + ); + }); } #[test] @@ -357,39 +336,39 @@ fn query_call_info_and_fee_details_works() { let len = encoded_call.len() as u32; ExtBuilder::default() - .base_weight(Weight::from_parts(5, 0)) - .weight_fee(2) - .build() - .execute_with(|| { - // all fees should be x1.5 - >::put(Multiplier::saturating_from_rational(3, 2)); - - assert_eq!( - TransactionPayment::query_call_info(call.clone(), len), - RuntimeDispatchInfo { - weight: info.weight, - class: info.class, - partial_fee: 5 * 2 /* base * weight_fee */ - + len as u64 /* len * 1 */ - + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ - }, - ); - - assert_eq!( - TransactionPayment::query_call_fee_details(call, len), - FeeDetails { - inclusion_fee: Some(InclusionFee { - base_fee: 5 * 2, /* base * weight_fee */ - len_fee: len as u64, /* len * 1 */ - adjusted_weight_fee: info - .weight - .min(BlockWeights::get().max_block) - .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multipler */ - }), - tip: 0, - }, - ); - }); + .base_weight(Weight::from_parts(5, 0)) + .weight_fee(2) + .build() + .execute_with(|| { + // all fees should be x1.5 + >::put(Multiplier::saturating_from_rational(3, 2)); + + assert_eq!( + TransactionPayment::query_call_info(call.clone(), len), + RuntimeDispatchInfo { + weight: info.weight, + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + + len as u64 /* len * 1 */ + + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ + }, + ); + + assert_eq!( + TransactionPayment::query_call_fee_details(call, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, /* base * weight_fee */ + len_fee: len as u64, /* len * 1 */ + adjusted_weight_fee: info + .weight + .min(BlockWeights::get().max_block) + .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multipler */ + }), + tip: 0, + }, + ); + }); } #[test] @@ -526,27 +505,23 @@ fn refund_does_not_recreate_account() { .execute_with(|| { // So events are emitted System::set_block_number(10); - let len = 10; - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let info = info_from_weight(Weight::from_parts(100, 0)); + Ext::from(5 /* tipped */) + .test_run(Some(2).into(), CALL, &info, 10, |origin| { + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + + // kill the account between pre and post dispatch + assert_ok!(Balances::transfer_allow_death( + origin, + 3, + Balances::free_balance(2) + )); + assert_eq!(Balances::free_balance(2), 0); + + Ok(post_info_from_weight(Weight::from_parts(50, 0))) + }) + .unwrap() .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - // kill the account between pre and post dispatch - assert_ok!(Balances::transfer_allow_death( - Some(2).into(), - 3, - Balances::free_balance(2) - )); - assert_eq!(Balances::free_balance(2), 0); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(50, 0)), - len, - &Ok(()) - )); assert_eq!(Balances::free_balance(2), 0); // Transfer Event System::assert_has_event(RuntimeEvent::Balances(pallet_balances::Event::Transfer { @@ -568,20 +543,15 @@ fn actual_weight_higher_than_max_refunds_nothing() { .base_weight(Weight::from_parts(5, 0)) .build() .execute_with(|| { - let len = 10; - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let info = info_from_weight(Weight::from_parts(100, 0)); + Ext::from(5 /* tipped */) + .test_run(Some(2).into(), CALL, &info, 10, |_| { + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + Ok(post_info_from_weight(Weight::from_parts(101, 0))) + }) + .unwrap() .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(101, 0)), - len, - &Ok(()) - )); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); }); } @@ -594,25 +564,20 @@ fn zero_transfer_on_free_transaction() { .execute_with(|| { // So events are emitted System::set_block_number(10); - let len = 10; - let dispatch_info = DispatchInfo { + let info = DispatchInfo { weight: Weight::from_parts(100, 0), pays_fee: Pays::No, class: DispatchClass::Normal, }; let user = 69; - let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&user, CALL, &dispatch_info, len) + Ext::from(0) + .test_run(Some(user).into(), CALL, &info, 10, |_| { + assert_eq!(Balances::total_balance(&user), 0); + Ok(default_post_info()) + }) + .unwrap() .unwrap(); assert_eq!(Balances::total_balance(&user), 0); - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &dispatch_info, - &default_post_info(), - len, - &Ok(()) - )); - assert_eq!(Balances::total_balance(&user), 0); // TransactionFeePaid Event System::assert_has_event(RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { @@ -639,19 +604,11 @@ fn refund_consistent_with_actual_weight() { >::put(Multiplier::saturating_from_rational(5, 4)); - let pre = ChargeTransactionPayment::::from(tip) - .pre_dispatch(&2, CALL, &info, len) + Ext::from(tip) + .test_run(Some(2).into(), CALL, &info, len, |_| Ok(post_info)) + .unwrap() .unwrap(); - ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info, - &post_info, - len, - &Ok(()), - ) - .unwrap(); - let refund_based_fee = prev_balance - Balances::free_balance(2); let actual_fee = Pallet::::compute_actual_fee(len as u32, &info, &post_info, tip); @@ -673,18 +630,13 @@ fn should_alter_operational_priority() { class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &normal, len) - .unwrap() - .priority; + let ext = Ext::from(tip); + let priority = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; assert_eq!(priority, 60); - let priority = ChargeTransactionPayment::(2 * tip) - .validate(&2, CALL, &normal, len) - .unwrap() - .priority; - + let ext = Ext::from(2 * tip); + let priority = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; assert_eq!(priority, 110); }); @@ -694,16 +646,13 @@ fn should_alter_operational_priority() { class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &op, len) - .unwrap() - .priority; + + let ext = Ext::from(tip); + let priority = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; assert_eq!(priority, 5810); - let priority = ChargeTransactionPayment::(2 * tip) - .validate(&2, CALL, &op, len) - .unwrap() - .priority; + let ext = Ext::from(2 * tip); + let priority = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; assert_eq!(priority, 6110); }); } @@ -719,11 +668,8 @@ fn no_tip_has_some_priority() { class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &normal, len) - .unwrap() - .priority; - + let ext = Ext::from(tip); + let priority = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; assert_eq!(priority, 10); }); @@ -733,10 +679,8 @@ fn no_tip_has_some_priority() { class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &op, len) - .unwrap() - .priority; + let ext = Ext::from(tip); + let priority = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; assert_eq!(priority, 5510); }); } @@ -744,8 +688,8 @@ fn no_tip_has_some_priority() { #[test] fn higher_tip_have_higher_priority() { let get_priorities = |tip: u64| { - let mut priority1 = 0; - let mut priority2 = 0; + let mut pri1 = 0; + let mut pri2 = 0; let len = 10; ExtBuilder::default().balance_factor(100).build().execute_with(|| { let normal = DispatchInfo { @@ -753,10 +697,8 @@ fn higher_tip_have_higher_priority() { class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - priority1 = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &normal, len) - .unwrap() - .priority; + let ext = Ext::from(tip); + pri1 = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; }); ExtBuilder::default().balance_factor(100).build().execute_with(|| { @@ -765,13 +707,11 @@ fn higher_tip_have_higher_priority() { class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - priority2 = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &op, len) - .unwrap() - .priority; + let ext = Ext::from(tip); + pri2 = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; }); - (priority1, priority2) + (pri1, pri2) }; let mut prev_priorities = get_priorities(0); @@ -799,19 +739,11 @@ fn post_info_can_change_pays_fee() { >::put(Multiplier::saturating_from_rational(5, 4)); - let pre = ChargeTransactionPayment::::from(tip) - .pre_dispatch(&2, CALL, &info, len) + let post_info = ChargeTransactionPayment::::from(tip) + .test_run(Some(2).into(), CALL, &info, len, |_| Ok(post_info)) + .unwrap() .unwrap(); - ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info, - &post_info, - len, - &Ok(()), - ) - .unwrap(); - let refund_based_fee = prev_balance - Balances::free_balance(2); let actual_fee = Pallet::::compute_actual_fee(len as u32, &info, &post_info, tip); diff --git a/substrate/frame/transaction-payment/src/types.rs b/substrate/frame/transaction-payment/src/types.rs index 25cecc58a63ab092822d61bb32259a7a846b230a..bfb3012fef06033f9419708251865b8eadb619be 100644 --- a/substrate/frame/transaction-payment/src/types.rs +++ b/substrate/frame/transaction-payment/src/types.rs @@ -112,7 +112,7 @@ pub struct RuntimeDispatchInfo /// The inclusion fee of this dispatch. /// /// This does not include a tip or anything else that - /// depends on the signature (i.e. depends on a `SignedExtension`). + /// depends on the signature (i.e. depends on a `TransactionExtension`). #[cfg_attr(feature = "std", serde(with = "serde_balance"))] pub partial_fee: Balance, } diff --git a/substrate/frame/transaction-payment/src/weights.rs b/substrate/frame/transaction-payment/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..bcffb2eb331acf77f694f74510f61d8653db0f95 --- /dev/null +++ b/substrate/frame/transaction-payment/src/weights.rs @@ -0,0 +1,92 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/transaction-payment/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_transaction_payment`. +pub trait WeightInfo { + fn charge_transaction_payment() -> Weight; +} + +/// Weights for `pallet_transaction_payment` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 40_506_000 picoseconds. + Weight::from_parts(41_647_000, 1733) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 40_506_000 picoseconds. + Weight::from_parts(41_647_000, 1733) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } +} diff --git a/substrate/frame/transaction-storage/src/weights.rs b/substrate/frame/transaction-storage/src/weights.rs index 519317177c492f4e1179b9ee12f17da7d442b0e4..bfbed62c5a37202bb4b93ac9e07cedc0891251cc 100644 --- a/substrate/frame/transaction-storage/src/weights.rs +++ b/substrate/frame/transaction-storage/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_transaction_storage +//! Autogenerated weights for `pallet_transaction_storage` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/transaction-storage/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/transaction-storage/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,125 +49,133 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_transaction_storage. +/// Weight functions needed for `pallet_transaction_storage`. pub trait WeightInfo { fn store(l: u32, ) -> Weight; fn renew() -> Weight; fn check_proof_max() -> Weight; } -/// Weights for pallet_transaction_storage using the Substrate node and recommended hardware. +/// Weights for `pallet_transaction_storage` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: TransactionStorage ByteFee (r:1 w:0) - /// Proof: TransactionStorage ByteFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage EntryFee (r:1 w:0) - /// Proof: TransactionStorage EntryFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage BlockTransactions (r:1 w:1) - /// Proof: TransactionStorage BlockTransactions (max_values: Some(1), max_size: Some(36866), added: 37361, mode: MaxEncodedLen) + /// Storage: `TransactionStorage::ByteFee` (r:1 w:0) + /// Proof: `TransactionStorage::ByteFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) + /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) + /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 8388608]`. fn store(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `176` + // Measured: `242` // Estimated: `38351` - // Minimum execution time: 34_844_000 picoseconds. - Weight::from_parts(35_489_000, 38351) + // Minimum execution time: 57_912_000 picoseconds. + Weight::from_parts(58_642_000, 38351) // Standard Error: 11 - .saturating_add(Weight::from_parts(6_912, 0).saturating_mul(l.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(Weight::from_parts(6_778, 0).saturating_mul(l.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: TransactionStorage Transactions (r:1 w:0) - /// Proof: TransactionStorage Transactions (max_values: None, max_size: Some(36886), added: 39361, mode: MaxEncodedLen) - /// Storage: TransactionStorage ByteFee (r:1 w:0) - /// Proof: TransactionStorage ByteFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage EntryFee (r:1 w:0) - /// Proof: TransactionStorage EntryFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage BlockTransactions (r:1 w:1) - /// Proof: TransactionStorage BlockTransactions (max_values: Some(1), max_size: Some(36866), added: 37361, mode: MaxEncodedLen) + /// Storage: `TransactionStorage::Transactions` (r:1 w:0) + /// Proof: `TransactionStorage::Transactions` (`max_values`: None, `max_size`: Some(36886), added: 39361, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::ByteFee` (r:1 w:0) + /// Proof: `TransactionStorage::ByteFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) + /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) + /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `326` + // Measured: `430` // Estimated: `40351` - // Minimum execution time: 48_244_000 picoseconds. - Weight::from_parts(50_939_000, 40351) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + // Minimum execution time: 72_571_000 picoseconds. + Weight::from_parts(74_832_000, 40351) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: TransactionStorage ProofChecked (r:1 w:1) - /// Proof: TransactionStorage ProofChecked (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: TransactionStorage StoragePeriod (r:1 w:0) - /// Proof: TransactionStorage StoragePeriod (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: TransactionStorage ChunkCount (r:1 w:0) - /// Proof: TransactionStorage ChunkCount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TransactionStorage Transactions (r:1 w:0) - /// Proof: TransactionStorage Transactions (max_values: None, max_size: Some(36886), added: 39361, mode: MaxEncodedLen) + /// Storage: `TransactionStorage::ProofChecked` (r:1 w:1) + /// Proof: `TransactionStorage::ProofChecked` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::StoragePeriod` (r:1 w:0) + /// Proof: `TransactionStorage::StoragePeriod` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::ChunkCount` (r:1 w:0) + /// Proof: `TransactionStorage::ChunkCount` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::Transactions` (r:1 w:0) + /// Proof: `TransactionStorage::Transactions` (`max_values`: None, `max_size`: Some(36886), added: 39361, mode: `MaxEncodedLen`) fn check_proof_max() -> Weight { // Proof Size summary in bytes: - // Measured: `37145` + // Measured: `37211` // Estimated: `40351` - // Minimum execution time: 80_913_000 picoseconds. - Weight::from_parts(84_812_000, 40351) + // Minimum execution time: 65_985_000 picoseconds. + Weight::from_parts(72_960_000, 40351) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: TransactionStorage ByteFee (r:1 w:0) - /// Proof: TransactionStorage ByteFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage EntryFee (r:1 w:0) - /// Proof: TransactionStorage EntryFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage BlockTransactions (r:1 w:1) - /// Proof: TransactionStorage BlockTransactions (max_values: Some(1), max_size: Some(36866), added: 37361, mode: MaxEncodedLen) + /// Storage: `TransactionStorage::ByteFee` (r:1 w:0) + /// Proof: `TransactionStorage::ByteFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) + /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) + /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 8388608]`. fn store(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `176` + // Measured: `242` // Estimated: `38351` - // Minimum execution time: 34_844_000 picoseconds. - Weight::from_parts(35_489_000, 38351) + // Minimum execution time: 57_912_000 picoseconds. + Weight::from_parts(58_642_000, 38351) // Standard Error: 11 - .saturating_add(Weight::from_parts(6_912, 0).saturating_mul(l.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(Weight::from_parts(6_778, 0).saturating_mul(l.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: TransactionStorage Transactions (r:1 w:0) - /// Proof: TransactionStorage Transactions (max_values: None, max_size: Some(36886), added: 39361, mode: MaxEncodedLen) - /// Storage: TransactionStorage ByteFee (r:1 w:0) - /// Proof: TransactionStorage ByteFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage EntryFee (r:1 w:0) - /// Proof: TransactionStorage EntryFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage BlockTransactions (r:1 w:1) - /// Proof: TransactionStorage BlockTransactions (max_values: Some(1), max_size: Some(36866), added: 37361, mode: MaxEncodedLen) + /// Storage: `TransactionStorage::Transactions` (r:1 w:0) + /// Proof: `TransactionStorage::Transactions` (`max_values`: None, `max_size`: Some(36886), added: 39361, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::ByteFee` (r:1 w:0) + /// Proof: `TransactionStorage::ByteFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) + /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) + /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `326` + // Measured: `430` // Estimated: `40351` - // Minimum execution time: 48_244_000 picoseconds. - Weight::from_parts(50_939_000, 40351) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + // Minimum execution time: 72_571_000 picoseconds. + Weight::from_parts(74_832_000, 40351) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: TransactionStorage ProofChecked (r:1 w:1) - /// Proof: TransactionStorage ProofChecked (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: TransactionStorage StoragePeriod (r:1 w:0) - /// Proof: TransactionStorage StoragePeriod (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: TransactionStorage ChunkCount (r:1 w:0) - /// Proof: TransactionStorage ChunkCount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TransactionStorage Transactions (r:1 w:0) - /// Proof: TransactionStorage Transactions (max_values: None, max_size: Some(36886), added: 39361, mode: MaxEncodedLen) + /// Storage: `TransactionStorage::ProofChecked` (r:1 w:1) + /// Proof: `TransactionStorage::ProofChecked` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::StoragePeriod` (r:1 w:0) + /// Proof: `TransactionStorage::StoragePeriod` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::ChunkCount` (r:1 w:0) + /// Proof: `TransactionStorage::ChunkCount` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::Transactions` (r:1 w:0) + /// Proof: `TransactionStorage::Transactions` (`max_values`: None, `max_size`: Some(36886), added: 39361, mode: `MaxEncodedLen`) fn check_proof_max() -> Weight { // Proof Size summary in bytes: - // Measured: `37145` + // Measured: `37211` // Estimated: `40351` - // Minimum execution time: 80_913_000 picoseconds. - Weight::from_parts(84_812_000, 40351) + // Minimum execution time: 65_985_000 picoseconds. + Weight::from_parts(72_960_000, 40351) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/treasury/src/weights.rs b/substrate/frame/treasury/src/weights.rs index 030e18980eb54f8b16452691b884e6046878aaa2..8ef4f89fd6519037d811666e0792cd13bcc2661a 100644 --- a/substrate/frame/treasury/src/weights.rs +++ b/substrate/frame/treasury/src/weights.rs @@ -15,27 +15,31 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_treasury +//! Autogenerated weights for `pallet_treasury` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-07, STEPS: `20`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/debug/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev -// --steps=20 -// --repeat=2 -// --pallet=pallet-treasury +// --steps=50 +// --repeat=20 +// --pallet=pallet_treasury +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/treasury/src/._weights.rs -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/treasury/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -45,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_treasury. +/// Weight functions needed for `pallet_treasury`. pub trait WeightInfo { fn spend_local() -> Weight; fn propose_spend() -> Weight; @@ -59,304 +63,304 @@ pub trait WeightInfo { fn void_spend() -> Weight; } -/// Weights for pallet_treasury using the Substrate node and recommended hardware. +/// Weights for `pallet_treasury` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn spend_local() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1887` - // Minimum execution time: 179_000_000 picoseconds. - Weight::from_parts(190_000_000, 1887) + // Minimum execution time: 11_686_000 picoseconds. + Weight::from_parts(12_138_000, 1887) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn propose_spend() -> Weight { // Proof Size summary in bytes: // Measured: `177` // Estimated: `1489` - // Minimum execution time: 349_000_000 picoseconds. - Weight::from_parts(398_000_000, 1489) + // Minimum execution time: 23_773_000 picoseconds. + Weight::from_parts(24_801_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Treasury Proposals (r:1 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Treasury::Proposals` (r:1 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn reject_proposal() -> Weight { // Proof Size summary in bytes: // Measured: `335` // Estimated: `3593` - // Minimum execution time: 367_000_000 picoseconds. - Weight::from_parts(388_000_000, 3593) + // Minimum execution time: 24_533_000 picoseconds. + Weight::from_parts(25_731_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Treasury Proposals (r:1 w:0) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Proposals` (r:1 w:0) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn approve_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `483 + p * (9 ±0)` + // Measured: `504 + p * (8 ±0)` // Estimated: `3573` - // Minimum execution time: 111_000_000 picoseconds. - Weight::from_parts(108_813_243, 3573) - // Standard Error: 147_887 - .saturating_add(Weight::from_parts(683_216, 0).saturating_mul(p.into())) + // Minimum execution time: 8_245_000 picoseconds. + Weight::from_parts(11_300_222, 3573) + // Standard Error: 1_080 + .saturating_add(Weight::from_parts(71_375, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `161` // Estimated: `1887` - // Minimum execution time: 71_000_000 picoseconds. - Weight::from_parts(78_000_000, 1887) + // Minimum execution time: 6_231_000 picoseconds. + Weight::from_parts(6_517_000, 1887) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Treasury Deactivated (r:1 w:1) - /// Proof: Treasury Deactivated (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:99 w:99) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:198 w:198) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Deactivated` (r:1 w:1) + /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:99 w:99) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:198 w:198) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `427 + p * (251 ±0)` + // Measured: `451 + p * (251 ±0)` // Estimated: `1887 + p * (5206 ±0)` - // Minimum execution time: 614_000_000 picoseconds. - Weight::from_parts(498_501_558, 1887) - // Standard Error: 1_070_260 - .saturating_add(Weight::from_parts(599_011_690, 0).saturating_mul(p.into())) + // Minimum execution time: 30_729_000 picoseconds. + Weight::from_parts(37_861_389, 1887) + // Standard Error: 18_741 + .saturating_add(Weight::from_parts(31_377_118, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:0) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) - /// Storage: Treasury SpendCount (r:1 w:1) - /// Proof: Treasury SpendCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Spends (r:0 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:0) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::SpendCount` (r:1 w:1) + /// Proof: `Treasury::SpendCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Spends` (r:0 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn spend() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `3501` - // Minimum execution time: 214_000_000 picoseconds. - Weight::from_parts(216_000_000, 3501) + // Minimum execution time: 13_700_000 picoseconds. + Weight::from_parts(14_519_000, 3501) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `705` + // Measured: `709` // Estimated: `6208` - // Minimum execution time: 760_000_000 picoseconds. - Weight::from_parts(822_000_000, 6208) + // Minimum execution time: 54_888_000 picoseconds. + Weight::from_parts(55_966_000, 6208) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn check_status() -> Weight { // Proof Size summary in bytes: - // Measured: `194` - // Estimated: `3534` - // Minimum execution time: 153_000_000 picoseconds. - Weight::from_parts(160_000_000, 3534) + // Measured: `198` + // Estimated: `3538` + // Minimum execution time: 11_802_000 picoseconds. + Weight::from_parts(12_421_000, 3538) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn void_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `194` - // Estimated: `3534` - // Minimum execution time: 147_000_000 picoseconds. - Weight::from_parts(181_000_000, 3534) + // Measured: `198` + // Estimated: `3538` + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(11_052_000, 3538) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn spend_local() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1887` - // Minimum execution time: 179_000_000 picoseconds. - Weight::from_parts(190_000_000, 1887) + // Minimum execution time: 11_686_000 picoseconds. + Weight::from_parts(12_138_000, 1887) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn propose_spend() -> Weight { // Proof Size summary in bytes: // Measured: `177` // Estimated: `1489` - // Minimum execution time: 349_000_000 picoseconds. - Weight::from_parts(398_000_000, 1489) + // Minimum execution time: 23_773_000 picoseconds. + Weight::from_parts(24_801_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Treasury Proposals (r:1 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Treasury::Proposals` (r:1 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn reject_proposal() -> Weight { // Proof Size summary in bytes: // Measured: `335` // Estimated: `3593` - // Minimum execution time: 367_000_000 picoseconds. - Weight::from_parts(388_000_000, 3593) + // Minimum execution time: 24_533_000 picoseconds. + Weight::from_parts(25_731_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Treasury Proposals (r:1 w:0) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Proposals` (r:1 w:0) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn approve_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `483 + p * (9 ±0)` + // Measured: `504 + p * (8 ±0)` // Estimated: `3573` - // Minimum execution time: 111_000_000 picoseconds. - Weight::from_parts(108_813_243, 3573) - // Standard Error: 147_887 - .saturating_add(Weight::from_parts(683_216, 0).saturating_mul(p.into())) + // Minimum execution time: 8_245_000 picoseconds. + Weight::from_parts(11_300_222, 3573) + // Standard Error: 1_080 + .saturating_add(Weight::from_parts(71_375, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `161` // Estimated: `1887` - // Minimum execution time: 71_000_000 picoseconds. - Weight::from_parts(78_000_000, 1887) + // Minimum execution time: 6_231_000 picoseconds. + Weight::from_parts(6_517_000, 1887) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Treasury Deactivated (r:1 w:1) - /// Proof: Treasury Deactivated (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:99 w:99) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:198 w:198) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Deactivated` (r:1 w:1) + /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:99 w:99) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:198 w:198) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `427 + p * (251 ±0)` + // Measured: `451 + p * (251 ±0)` // Estimated: `1887 + p * (5206 ±0)` - // Minimum execution time: 614_000_000 picoseconds. - Weight::from_parts(498_501_558, 1887) - // Standard Error: 1_070_260 - .saturating_add(Weight::from_parts(599_011_690, 0).saturating_mul(p.into())) + // Minimum execution time: 30_729_000 picoseconds. + Weight::from_parts(37_861_389, 1887) + // Standard Error: 18_741 + .saturating_add(Weight::from_parts(31_377_118, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:0) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) - /// Storage: Treasury SpendCount (r:1 w:1) - /// Proof: Treasury SpendCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Spends (r:0 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:0) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::SpendCount` (r:1 w:1) + /// Proof: `Treasury::SpendCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Spends` (r:0 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn spend() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `3501` - // Minimum execution time: 214_000_000 picoseconds. - Weight::from_parts(216_000_000, 3501) + // Minimum execution time: 13_700_000 picoseconds. + Weight::from_parts(14_519_000, 3501) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `705` + // Measured: `709` // Estimated: `6208` - // Minimum execution time: 760_000_000 picoseconds. - Weight::from_parts(822_000_000, 6208) + // Minimum execution time: 54_888_000 picoseconds. + Weight::from_parts(55_966_000, 6208) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn check_status() -> Weight { // Proof Size summary in bytes: - // Measured: `194` - // Estimated: `3534` - // Minimum execution time: 153_000_000 picoseconds. - Weight::from_parts(160_000_000, 3534) + // Measured: `198` + // Estimated: `3538` + // Minimum execution time: 11_802_000 picoseconds. + Weight::from_parts(12_421_000, 3538) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn void_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `194` - // Estimated: `3534` - // Minimum execution time: 147_000_000 picoseconds. - Weight::from_parts(181_000_000, 3534) + // Measured: `198` + // Estimated: `3538` + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(11_052_000, 3538) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/tx-pause/src/weights.rs b/substrate/frame/tx-pause/src/weights.rs index b733e64b159dc40872e27dd540ed40820e5cc78d..14fead12a8e11647c15714b2937f06da7c8b2f99 100644 --- a/substrate/frame/tx-pause/src/weights.rs +++ b/substrate/frame/tx-pause/src/weights.rs @@ -17,27 +17,29 @@ //! Autogenerated weights for `pallet_tx_pause` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-aahe6cbd-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_tx_pause +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_tx_pause -// --chain=dev -// --header=./HEADER-APACHE2 -// --output=./frame/tx-pause/src/weights.rs -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/tx-pause/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -62,8 +64,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3` // Estimated: `3997` - // Minimum execution time: 15_096_000 picoseconds. - Weight::from_parts(15_437_000, 3997) + // Minimum execution time: 12_014_000 picoseconds. + Weight::from_parts(12_980_000, 3997) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -73,8 +75,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `565` // Estimated: `3997` - // Minimum execution time: 21_546_000 picoseconds. - Weight::from_parts(22_178_000, 3997) + // Minimum execution time: 17_955_000 picoseconds. + Weight::from_parts(18_348_000, 3997) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -88,8 +90,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3` // Estimated: `3997` - // Minimum execution time: 15_096_000 picoseconds. - Weight::from_parts(15_437_000, 3997) + // Minimum execution time: 12_014_000 picoseconds. + Weight::from_parts(12_980_000, 3997) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -99,8 +101,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `565` // Estimated: `3997` - // Minimum execution time: 21_546_000 picoseconds. - Weight::from_parts(22_178_000, 3997) + // Minimum execution time: 17_955_000 picoseconds. + Weight::from_parts(18_348_000, 3997) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/uniques/src/weights.rs b/substrate/frame/uniques/src/weights.rs index eb80ee550a1db2d97aaed001f5041e49fdbca864..6c7b60b82e52e2caa9ac4536a33dda4df687c122 100644 --- a/substrate/frame/uniques/src/weights.rs +++ b/substrate/frame/uniques/src/weights.rs @@ -17,27 +17,29 @@ //! Autogenerated weights for `pallet_uniques` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-gghbxkbs-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_uniques +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_uniques -// --chain=dev -// --header=./HEADER-APACHE2 -// --output=./frame/uniques/src/weights.rs -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/uniques/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -88,8 +90,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `249` // Estimated: `3643` - // Minimum execution time: 31_393_000 picoseconds. - Weight::from_parts(32_933_000, 3643) + // Minimum execution time: 27_960_000 picoseconds. + Weight::from_parts(28_781_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -101,8 +103,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3643` - // Minimum execution time: 14_827_000 picoseconds. - Weight::from_parts(15_273_000, 3643) + // Minimum execution time: 11_970_000 picoseconds. + Weight::from_parts(12_512_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -111,13 +113,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Uniques::Asset` (r:1001 w:1000) /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1000 w:1000) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) /// Storage: `Uniques::Attribute` (r:1000 w:1000) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassAccount` (r:0 w:1) /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassMetadataOf` (r:0 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) /// Storage: `Uniques::Account` (r:0 w:1000) /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// Storage: `Uniques::CollectionMaxSupply` (r:0 w:1) @@ -128,15 +130,15 @@ impl WeightInfo for SubstrateWeight { fn destroy(n: u32, m: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `418 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` - // Estimated: `3643 + a * (2839 ±0) + m * (2583 ±0) + n * (2597 ±0)` - // Minimum execution time: 3_281_673_000 picoseconds. - Weight::from_parts(3_443_387_000, 3643) - // Standard Error: 41_937 - .saturating_add(Weight::from_parts(7_914_842, 0).saturating_mul(n.into())) - // Standard Error: 41_937 - .saturating_add(Weight::from_parts(519_960, 0).saturating_mul(m.into())) - // Standard Error: 41_937 - .saturating_add(Weight::from_parts(462_690, 0).saturating_mul(a.into())) + // Estimated: `3643 + a * (2647 ±0) + m * (2662 ±0) + n * (2597 ±0)` + // Minimum execution time: 2_979_858_000 picoseconds. + Weight::from_parts(3_007_268_000, 3643) + // Standard Error: 29_899 + .saturating_add(Weight::from_parts(6_943_612, 0).saturating_mul(n.into())) + // Standard Error: 29_899 + .saturating_add(Weight::from_parts(398_114, 0).saturating_mul(m.into())) + // Standard Error: 29_899 + .saturating_add(Weight::from_parts(369_941, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) @@ -145,8 +147,8 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2839).saturating_mul(a.into())) - .saturating_add(Weight::from_parts(0, 2583).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 2647).saturating_mul(a.into())) + .saturating_add(Weight::from_parts(0, 2662).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 2597).saturating_mul(n.into())) } /// Storage: `Uniques::Asset` (r:1 w:1) @@ -161,8 +163,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 38_122_000 picoseconds. - Weight::from_parts(38_924_000, 3643) + // Minimum execution time: 32_733_000 picoseconds. + Weight::from_parts(33_487_000, 3643) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -178,8 +180,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 38_835_000 picoseconds. - Weight::from_parts(39_754_000, 3643) + // Minimum execution time: 34_202_000 picoseconds. + Weight::from_parts(35_146_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -195,8 +197,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 27_032_000 picoseconds. - Weight::from_parts(27_793_000, 3643) + // Minimum execution time: 24_285_000 picoseconds. + Weight::from_parts(25_300_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -209,10 +211,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `805 + i * (76 ±0)` // Estimated: `3643 + i * (2597 ±0)` - // Minimum execution time: 14_737_000 picoseconds. - Weight::from_parts(15_070_000, 3643) - // Standard Error: 22_500 - .saturating_add(Weight::from_parts(18_855_468, 0).saturating_mul(i.into())) + // Minimum execution time: 11_641_000 picoseconds. + Weight::from_parts(12_261_000, 3643) + // Standard Error: 18_897 + .saturating_add(Weight::from_parts(15_263_570, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -227,8 +229,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 18_664_000 picoseconds. - Weight::from_parts(19_455_000, 3643) + // Minimum execution time: 16_122_000 picoseconds. + Weight::from_parts(16_627_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -240,8 +242,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 18_247_000 picoseconds. - Weight::from_parts(18_763_000, 3643) + // Minimum execution time: 15_824_000 picoseconds. + Weight::from_parts(16_522_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -251,8 +253,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 13_219_000 picoseconds. - Weight::from_parts(13_923_000, 3643) + // Minimum execution time: 10_802_000 picoseconds. + Weight::from_parts(11_206_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -262,8 +264,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 13_376_000 picoseconds. - Weight::from_parts(13_904_000, 3643) + // Minimum execution time: 10_805_000 picoseconds. + Weight::from_parts(11_340_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -271,16 +273,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassAccount` (r:0 w:2) /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `423` + // Measured: `597` // Estimated: `3643` - // Minimum execution time: 22_353_000 picoseconds. - Weight::from_parts(23_222_000, 3643) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Minimum execution time: 24_213_000 picoseconds. + Weight::from_parts(25_547_000, 3643) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) @@ -288,8 +292,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 14_072_000 picoseconds. - Weight::from_parts(14_619_000, 3643) + // Minimum execution time: 11_256_000 picoseconds. + Weight::from_parts(11_585_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -301,90 +305,90 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 17_081_000 picoseconds. - Weight::from_parts(17_698_000, 3643) + // Minimum execution time: 14_616_000 picoseconds. + Weight::from_parts(15_064_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `547` - // Estimated: `3829` - // Minimum execution time: 41_501_000 picoseconds. - Weight::from_parts(43_101_000, 3829) + // Measured: `626` + // Estimated: `3652` + // Minimum execution time: 35_640_000 picoseconds. + Weight::from_parts(37_007_000, 3652) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `936` - // Estimated: `3829` - // Minimum execution time: 39_722_000 picoseconds. - Weight::from_parts(40_390_000, 3829) + // Measured: `823` + // Estimated: `3652` + // Minimum execution time: 34_410_000 picoseconds. + Weight::from_parts(35_431_000, 3652) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn set_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `415` - // Estimated: `3643` - // Minimum execution time: 30_726_000 picoseconds. - Weight::from_parts(31_557_000, 3643) + // Estimated: `3652` + // Minimum execution time: 26_187_000 picoseconds. + Weight::from_parts(27_837_000, 3652) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `547` - // Estimated: `3643` - // Minimum execution time: 31_303_000 picoseconds. - Weight::from_parts(32_389_000, 3643) + // Measured: `626` + // Estimated: `3652` + // Minimum execution time: 26_640_000 picoseconds. + Weight::from_parts(27_688_000, 3652) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 32_155_000 picoseconds. - Weight::from_parts(32_885_000, 3643) + // Minimum execution time: 26_781_000 picoseconds. + Weight::from_parts(27_628_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:0) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `461` + // Measured: `540` // Estimated: `3643` - // Minimum execution time: 30_044_000 picoseconds. - Weight::from_parts(31_405_000, 3643) + // Minimum execution time: 26_295_000 picoseconds. + Weight::from_parts(26_903_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -396,8 +400,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 18_904_000 picoseconds. - Weight::from_parts(19_687_000, 3643) + // Minimum execution time: 16_678_000 picoseconds. + Weight::from_parts(17_548_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -409,8 +413,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `528` // Estimated: `3643` - // Minimum execution time: 19_144_000 picoseconds. - Weight::from_parts(19_706_000, 3643) + // Minimum execution time: 16_408_000 picoseconds. + Weight::from_parts(16_957_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -420,8 +424,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3517` - // Minimum execution time: 15_339_000 picoseconds. - Weight::from_parts(15_918_000, 3517) + // Minimum execution time: 12_568_000 picoseconds. + Weight::from_parts(12_960_000, 3517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -433,8 +437,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_387_000 picoseconds. - Weight::from_parts(15_726_000, 3643) + // Minimum execution time: 13_110_000 picoseconds. + Weight::from_parts(13_620_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -446,8 +450,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3587` - // Minimum execution time: 15_873_000 picoseconds. - Weight::from_parts(16_860_000, 3587) + // Minimum execution time: 13_568_000 picoseconds. + Weight::from_parts(14_211_000, 3587) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -463,8 +467,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `607` // Estimated: `3643` - // Minimum execution time: 37_245_000 picoseconds. - Weight::from_parts(38_383_000, 3643) + // Minimum execution time: 32_660_000 picoseconds. + Weight::from_parts(33_734_000, 3643) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -480,8 +484,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `249` // Estimated: `3643` - // Minimum execution time: 31_393_000 picoseconds. - Weight::from_parts(32_933_000, 3643) + // Minimum execution time: 27_960_000 picoseconds. + Weight::from_parts(28_781_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -493,8 +497,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3643` - // Minimum execution time: 14_827_000 picoseconds. - Weight::from_parts(15_273_000, 3643) + // Minimum execution time: 11_970_000 picoseconds. + Weight::from_parts(12_512_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -503,13 +507,13 @@ impl WeightInfo for () { /// Storage: `Uniques::Asset` (r:1001 w:1000) /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1000 w:1000) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) /// Storage: `Uniques::Attribute` (r:1000 w:1000) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassAccount` (r:0 w:1) /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassMetadataOf` (r:0 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) /// Storage: `Uniques::Account` (r:0 w:1000) /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// Storage: `Uniques::CollectionMaxSupply` (r:0 w:1) @@ -520,15 +524,15 @@ impl WeightInfo for () { fn destroy(n: u32, m: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `418 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` - // Estimated: `3643 + a * (2839 ±0) + m * (2583 ±0) + n * (2597 ±0)` - // Minimum execution time: 3_281_673_000 picoseconds. - Weight::from_parts(3_443_387_000, 3643) - // Standard Error: 41_937 - .saturating_add(Weight::from_parts(7_914_842, 0).saturating_mul(n.into())) - // Standard Error: 41_937 - .saturating_add(Weight::from_parts(519_960, 0).saturating_mul(m.into())) - // Standard Error: 41_937 - .saturating_add(Weight::from_parts(462_690, 0).saturating_mul(a.into())) + // Estimated: `3643 + a * (2647 ±0) + m * (2662 ±0) + n * (2597 ±0)` + // Minimum execution time: 2_979_858_000 picoseconds. + Weight::from_parts(3_007_268_000, 3643) + // Standard Error: 29_899 + .saturating_add(Weight::from_parts(6_943_612, 0).saturating_mul(n.into())) + // Standard Error: 29_899 + .saturating_add(Weight::from_parts(398_114, 0).saturating_mul(m.into())) + // Standard Error: 29_899 + .saturating_add(Weight::from_parts(369_941, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) @@ -537,8 +541,8 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2839).saturating_mul(a.into())) - .saturating_add(Weight::from_parts(0, 2583).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 2647).saturating_mul(a.into())) + .saturating_add(Weight::from_parts(0, 2662).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 2597).saturating_mul(n.into())) } /// Storage: `Uniques::Asset` (r:1 w:1) @@ -553,8 +557,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 38_122_000 picoseconds. - Weight::from_parts(38_924_000, 3643) + // Minimum execution time: 32_733_000 picoseconds. + Weight::from_parts(33_487_000, 3643) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -570,8 +574,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 38_835_000 picoseconds. - Weight::from_parts(39_754_000, 3643) + // Minimum execution time: 34_202_000 picoseconds. + Weight::from_parts(35_146_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -587,8 +591,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 27_032_000 picoseconds. - Weight::from_parts(27_793_000, 3643) + // Minimum execution time: 24_285_000 picoseconds. + Weight::from_parts(25_300_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -601,10 +605,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `805 + i * (76 ±0)` // Estimated: `3643 + i * (2597 ±0)` - // Minimum execution time: 14_737_000 picoseconds. - Weight::from_parts(15_070_000, 3643) - // Standard Error: 22_500 - .saturating_add(Weight::from_parts(18_855_468, 0).saturating_mul(i.into())) + // Minimum execution time: 11_641_000 picoseconds. + Weight::from_parts(12_261_000, 3643) + // Standard Error: 18_897 + .saturating_add(Weight::from_parts(15_263_570, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -619,8 +623,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 18_664_000 picoseconds. - Weight::from_parts(19_455_000, 3643) + // Minimum execution time: 16_122_000 picoseconds. + Weight::from_parts(16_627_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -632,8 +636,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 18_247_000 picoseconds. - Weight::from_parts(18_763_000, 3643) + // Minimum execution time: 15_824_000 picoseconds. + Weight::from_parts(16_522_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -643,8 +647,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 13_219_000 picoseconds. - Weight::from_parts(13_923_000, 3643) + // Minimum execution time: 10_802_000 picoseconds. + Weight::from_parts(11_206_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -654,8 +658,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 13_376_000 picoseconds. - Weight::from_parts(13_904_000, 3643) + // Minimum execution time: 10_805_000 picoseconds. + Weight::from_parts(11_340_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -663,16 +667,18 @@ impl WeightInfo for () { /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassAccount` (r:0 w:2) /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `423` + // Measured: `597` // Estimated: `3643` - // Minimum execution time: 22_353_000 picoseconds. - Weight::from_parts(23_222_000, 3643) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Minimum execution time: 24_213_000 picoseconds. + Weight::from_parts(25_547_000, 3643) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) @@ -680,8 +686,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 14_072_000 picoseconds. - Weight::from_parts(14_619_000, 3643) + // Minimum execution time: 11_256_000 picoseconds. + Weight::from_parts(11_585_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -693,90 +699,90 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 17_081_000 picoseconds. - Weight::from_parts(17_698_000, 3643) + // Minimum execution time: 14_616_000 picoseconds. + Weight::from_parts(15_064_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `547` - // Estimated: `3829` - // Minimum execution time: 41_501_000 picoseconds. - Weight::from_parts(43_101_000, 3829) + // Measured: `626` + // Estimated: `3652` + // Minimum execution time: 35_640_000 picoseconds. + Weight::from_parts(37_007_000, 3652) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `936` - // Estimated: `3829` - // Minimum execution time: 39_722_000 picoseconds. - Weight::from_parts(40_390_000, 3829) + // Measured: `823` + // Estimated: `3652` + // Minimum execution time: 34_410_000 picoseconds. + Weight::from_parts(35_431_000, 3652) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn set_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `415` - // Estimated: `3643` - // Minimum execution time: 30_726_000 picoseconds. - Weight::from_parts(31_557_000, 3643) + // Estimated: `3652` + // Minimum execution time: 26_187_000 picoseconds. + Weight::from_parts(27_837_000, 3652) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `547` - // Estimated: `3643` - // Minimum execution time: 31_303_000 picoseconds. - Weight::from_parts(32_389_000, 3643) + // Measured: `626` + // Estimated: `3652` + // Minimum execution time: 26_640_000 picoseconds. + Weight::from_parts(27_688_000, 3652) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 32_155_000 picoseconds. - Weight::from_parts(32_885_000, 3643) + // Minimum execution time: 26_781_000 picoseconds. + Weight::from_parts(27_628_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:0) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `461` + // Measured: `540` // Estimated: `3643` - // Minimum execution time: 30_044_000 picoseconds. - Weight::from_parts(31_405_000, 3643) + // Minimum execution time: 26_295_000 picoseconds. + Weight::from_parts(26_903_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -788,8 +794,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 18_904_000 picoseconds. - Weight::from_parts(19_687_000, 3643) + // Minimum execution time: 16_678_000 picoseconds. + Weight::from_parts(17_548_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -801,8 +807,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `528` // Estimated: `3643` - // Minimum execution time: 19_144_000 picoseconds. - Weight::from_parts(19_706_000, 3643) + // Minimum execution time: 16_408_000 picoseconds. + Weight::from_parts(16_957_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -812,8 +818,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3517` - // Minimum execution time: 15_339_000 picoseconds. - Weight::from_parts(15_918_000, 3517) + // Minimum execution time: 12_568_000 picoseconds. + Weight::from_parts(12_960_000, 3517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -825,8 +831,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_387_000 picoseconds. - Weight::from_parts(15_726_000, 3643) + // Minimum execution time: 13_110_000 picoseconds. + Weight::from_parts(13_620_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -838,8 +844,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3587` - // Minimum execution time: 15_873_000 picoseconds. - Weight::from_parts(16_860_000, 3587) + // Minimum execution time: 13_568_000 picoseconds. + Weight::from_parts(14_211_000, 3587) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -855,8 +861,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `607` // Estimated: `3643` - // Minimum execution time: 37_245_000 picoseconds. - Weight::from_parts(38_383_000, 3643) + // Minimum execution time: 32_660_000 picoseconds. + Weight::from_parts(33_734_000, 3643) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } diff --git a/substrate/frame/utility/src/weights.rs b/substrate/frame/utility/src/weights.rs index 1a3ea6c1f7fc85df4e90a6e8552fc211a51f5405..60b1c48f0868febb7d62cf8865e2bc9a2a79f0b2 100644 --- a/substrate/frame/utility/src/weights.rs +++ b/substrate/frame/utility/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_utility +//! Autogenerated weights for `pallet_utility` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/utility/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/utility/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_utility. +/// Weight functions needed for `pallet_utility`. pub trait WeightInfo { fn batch(c: u32, ) -> Weight; fn as_derivative() -> Weight; @@ -59,99 +58,139 @@ pub trait WeightInfo { fn force_batch(c: u32, ) -> Weight; } -/// Weights for pallet_utility using the Substrate node and recommended hardware. +/// Weights for `pallet_utility` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_763_000 picoseconds. - Weight::from_parts(16_943_157, 0) - // Standard Error: 1_904 - .saturating_add(Weight::from_parts(4_653_855, 0).saturating_mul(c.into())) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 5_143_000 picoseconds. + Weight::from_parts(11_552_299, 3997) + // Standard Error: 2_325 + .saturating_add(Weight::from_parts(4_708_951, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_derivative() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 5_149_000 picoseconds. - Weight::from_parts(5_268_000, 0) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 9_103_000 picoseconds. + Weight::from_parts(9_549_000, 3997) + .saturating_add(T::DbWeight::get().reads(2_u64)) } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_976_000 picoseconds. - Weight::from_parts(16_448_433, 0) - // Standard Error: 1_834 - .saturating_add(Weight::from_parts(4_796_983, 0).saturating_mul(c.into())) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 5_147_000 picoseconds. + Weight::from_parts(15_698_086, 3997) + // Standard Error: 3_066 + .saturating_add(Weight::from_parts(4_809_412, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_102_000 picoseconds. - Weight::from_parts(9_353_000, 0) + // Minimum execution time: 6_683_000 picoseconds. + Weight::from_parts(7_047_000, 0) } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_840_000 picoseconds. - Weight::from_parts(17_748_474, 0) - // Standard Error: 2_059 - .saturating_add(Weight::from_parts(4_630_079, 0).saturating_mul(c.into())) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 4_982_000 picoseconds. + Weight::from_parts(14_474_780, 3997) + // Standard Error: 2_291 + .saturating_add(Weight::from_parts(4_632_643, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_763_000 picoseconds. - Weight::from_parts(16_943_157, 0) - // Standard Error: 1_904 - .saturating_add(Weight::from_parts(4_653_855, 0).saturating_mul(c.into())) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 5_143_000 picoseconds. + Weight::from_parts(11_552_299, 3997) + // Standard Error: 2_325 + .saturating_add(Weight::from_parts(4_708_951, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_derivative() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 5_149_000 picoseconds. - Weight::from_parts(5_268_000, 0) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 9_103_000 picoseconds. + Weight::from_parts(9_549_000, 3997) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_976_000 picoseconds. - Weight::from_parts(16_448_433, 0) - // Standard Error: 1_834 - .saturating_add(Weight::from_parts(4_796_983, 0).saturating_mul(c.into())) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 5_147_000 picoseconds. + Weight::from_parts(15_698_086, 3997) + // Standard Error: 3_066 + .saturating_add(Weight::from_parts(4_809_412, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_102_000 picoseconds. - Weight::from_parts(9_353_000, 0) + // Minimum execution time: 6_683_000 picoseconds. + Weight::from_parts(7_047_000, 0) } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_840_000 picoseconds. - Weight::from_parts(17_748_474, 0) - // Standard Error: 2_059 - .saturating_add(Weight::from_parts(4_630_079, 0).saturating_mul(c.into())) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 4_982_000 picoseconds. + Weight::from_parts(14_474_780, 3997) + // Standard Error: 2_291 + .saturating_add(Weight::from_parts(4_632_643, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/substrate/frame/vesting/src/weights.rs b/substrate/frame/vesting/src/weights.rs index ddc7db8fa61fbedef4c2629117459dc37c5f8bcd..d91c47ae195ab380b2fc18e560877e499d0093aa 100644 --- a/substrate/frame/vesting/src/weights.rs +++ b/substrate/frame/vesting/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_vesting` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_vesting +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_vesting -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/vesting/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -75,12 +77,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 32_846_000 picoseconds. - Weight::from_parts(30_974_459, 4764) - // Standard Error: 1_755 - .saturating_add(Weight::from_parts(73_138, 0).saturating_mul(l.into())) - // Standard Error: 3_123 - .saturating_add(Weight::from_parts(82_417, 0).saturating_mul(s.into())) + // Minimum execution time: 31_890_000 picoseconds. + Weight::from_parts(31_128_476, 4764) + // Standard Error: 1_193 + .saturating_add(Weight::from_parts(63_760, 0).saturating_mul(l.into())) + // Standard Error: 2_124 + .saturating_add(Weight::from_parts(57_247, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -96,12 +98,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 34_360_000 picoseconds. - Weight::from_parts(34_964_080, 4764) - // Standard Error: 1_996 - .saturating_add(Weight::from_parts(48_024, 0).saturating_mul(l.into())) - // Standard Error: 3_552 - .saturating_add(Weight::from_parts(34_411, 0).saturating_mul(s.into())) + // Minimum execution time: 33_958_000 picoseconds. + Weight::from_parts(33_537_005, 4764) + // Standard Error: 1_354 + .saturating_add(Weight::from_parts(60_604, 0).saturating_mul(l.into())) + // Standard Error: 2_410 + .saturating_add(Weight::from_parts(51_378, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -119,12 +121,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 33_859_000 picoseconds. - Weight::from_parts(32_950_681, 4764) - // Standard Error: 1_745 - .saturating_add(Weight::from_parts(69_323, 0).saturating_mul(l.into())) - // Standard Error: 3_105 - .saturating_add(Weight::from_parts(86_370, 0).saturating_mul(s.into())) + // Minimum execution time: 33_243_000 picoseconds. + Weight::from_parts(32_552_805, 4764) + // Standard Error: 1_413 + .saturating_add(Weight::from_parts(67_137, 0).saturating_mul(l.into())) + // Standard Error: 2_515 + .saturating_add(Weight::from_parts(70_000, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -142,12 +144,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_239_000 picoseconds. - Weight::from_parts(36_459_756, 4764) - // Standard Error: 2_051 - .saturating_add(Weight::from_parts(56_670, 0).saturating_mul(l.into())) - // Standard Error: 3_650 - .saturating_add(Weight::from_parts(49_663, 0).saturating_mul(s.into())) + // Minimum execution time: 35_396_000 picoseconds. + Weight::from_parts(35_774_949, 4764) + // Standard Error: 1_404 + .saturating_add(Weight::from_parts(55_349, 0).saturating_mul(l.into())) + // Standard Error: 2_497 + .saturating_add(Weight::from_parts(39_228, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -165,12 +167,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `555 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 70_330_000 picoseconds. - Weight::from_parts(71_196_328, 4764) - // Standard Error: 2_923 - .saturating_add(Weight::from_parts(67_238, 0).saturating_mul(l.into())) - // Standard Error: 5_201 - .saturating_add(Weight::from_parts(89_102, 0).saturating_mul(s.into())) + // Minimum execution time: 68_441_000 picoseconds. + Weight::from_parts(68_961_970, 4764) + // Standard Error: 2_509 + .saturating_add(Weight::from_parts(83_345, 0).saturating_mul(l.into())) + // Standard Error: 4_464 + .saturating_add(Weight::from_parts(124_158, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -188,12 +190,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `658 + l * (25 ±0) + s * (36 ±0)` // Estimated: `6196` - // Minimum execution time: 70_235_000 picoseconds. - Weight::from_parts(71_960_020, 6196) - // Standard Error: 2_493 - .saturating_add(Weight::from_parts(64_835, 0).saturating_mul(l.into())) - // Standard Error: 4_436 - .saturating_add(Weight::from_parts(102_159, 0).saturating_mul(s.into())) + // Minimum execution time: 70_190_000 picoseconds. + Weight::from_parts(72_355_984, 6196) + // Standard Error: 2_295 + .saturating_add(Weight::from_parts(59_968, 0).saturating_mul(l.into())) + // Standard Error: 4_084 + .saturating_add(Weight::from_parts(87_090, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -211,12 +213,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 34_352_000 picoseconds. - Weight::from_parts(33_697_027, 4764) - // Standard Error: 2_008 - .saturating_add(Weight::from_parts(79_270, 0).saturating_mul(l.into())) - // Standard Error: 3_710 - .saturating_add(Weight::from_parts(60_691, 0).saturating_mul(s.into())) + // Minimum execution time: 33_911_000 picoseconds. + Weight::from_parts(33_243_006, 4764) + // Standard Error: 1_199 + .saturating_add(Weight::from_parts(70_586, 0).saturating_mul(l.into())) + // Standard Error: 2_214 + .saturating_add(Weight::from_parts(60_985, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -234,12 +236,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 37_467_000 picoseconds. - Weight::from_parts(36_866_847, 4764) - // Standard Error: 1_692 - .saturating_add(Weight::from_parts(57_882, 0).saturating_mul(l.into())) - // Standard Error: 3_124 - .saturating_add(Weight::from_parts(80_266, 0).saturating_mul(s.into())) + // Minimum execution time: 36_242_000 picoseconds. + Weight::from_parts(35_812_994, 4764) + // Standard Error: 1_351 + .saturating_add(Weight::from_parts(68_256, 0).saturating_mul(l.into())) + // Standard Error: 2_496 + .saturating_add(Weight::from_parts(66_171, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -257,12 +259,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `555 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 41_497_000 picoseconds. - Weight::from_parts(38_763_834, 4764) - // Standard Error: 2_030 - .saturating_add(Weight::from_parts(99_580, 0).saturating_mul(l.into())) - // Standard Error: 3_750 - .saturating_add(Weight::from_parts(132_188, 0).saturating_mul(s.into())) + // Minimum execution time: 37_817_000 picoseconds. + Weight::from_parts(37_397_127, 4764) + // Standard Error: 1_665 + .saturating_add(Weight::from_parts(72_049, 0).saturating_mul(l.into())) + // Standard Error: 3_075 + .saturating_add(Weight::from_parts(60_973, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -282,12 +284,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 32_846_000 picoseconds. - Weight::from_parts(30_974_459, 4764) - // Standard Error: 1_755 - .saturating_add(Weight::from_parts(73_138, 0).saturating_mul(l.into())) - // Standard Error: 3_123 - .saturating_add(Weight::from_parts(82_417, 0).saturating_mul(s.into())) + // Minimum execution time: 31_890_000 picoseconds. + Weight::from_parts(31_128_476, 4764) + // Standard Error: 1_193 + .saturating_add(Weight::from_parts(63_760, 0).saturating_mul(l.into())) + // Standard Error: 2_124 + .saturating_add(Weight::from_parts(57_247, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -303,12 +305,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `381 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 34_360_000 picoseconds. - Weight::from_parts(34_964_080, 4764) - // Standard Error: 1_996 - .saturating_add(Weight::from_parts(48_024, 0).saturating_mul(l.into())) - // Standard Error: 3_552 - .saturating_add(Weight::from_parts(34_411, 0).saturating_mul(s.into())) + // Minimum execution time: 33_958_000 picoseconds. + Weight::from_parts(33_537_005, 4764) + // Standard Error: 1_354 + .saturating_add(Weight::from_parts(60_604, 0).saturating_mul(l.into())) + // Standard Error: 2_410 + .saturating_add(Weight::from_parts(51_378, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -326,12 +328,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 33_859_000 picoseconds. - Weight::from_parts(32_950_681, 4764) - // Standard Error: 1_745 - .saturating_add(Weight::from_parts(69_323, 0).saturating_mul(l.into())) - // Standard Error: 3_105 - .saturating_add(Weight::from_parts(86_370, 0).saturating_mul(s.into())) + // Minimum execution time: 33_243_000 picoseconds. + Weight::from_parts(32_552_805, 4764) + // Standard Error: 1_413 + .saturating_add(Weight::from_parts(67_137, 0).saturating_mul(l.into())) + // Standard Error: 2_515 + .saturating_add(Weight::from_parts(70_000, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -349,12 +351,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `484 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_239_000 picoseconds. - Weight::from_parts(36_459_756, 4764) - // Standard Error: 2_051 - .saturating_add(Weight::from_parts(56_670, 0).saturating_mul(l.into())) - // Standard Error: 3_650 - .saturating_add(Weight::from_parts(49_663, 0).saturating_mul(s.into())) + // Minimum execution time: 35_396_000 picoseconds. + Weight::from_parts(35_774_949, 4764) + // Standard Error: 1_404 + .saturating_add(Weight::from_parts(55_349, 0).saturating_mul(l.into())) + // Standard Error: 2_497 + .saturating_add(Weight::from_parts(39_228, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -372,12 +374,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `555 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 70_330_000 picoseconds. - Weight::from_parts(71_196_328, 4764) - // Standard Error: 2_923 - .saturating_add(Weight::from_parts(67_238, 0).saturating_mul(l.into())) - // Standard Error: 5_201 - .saturating_add(Weight::from_parts(89_102, 0).saturating_mul(s.into())) + // Minimum execution time: 68_441_000 picoseconds. + Weight::from_parts(68_961_970, 4764) + // Standard Error: 2_509 + .saturating_add(Weight::from_parts(83_345, 0).saturating_mul(l.into())) + // Standard Error: 4_464 + .saturating_add(Weight::from_parts(124_158, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -395,12 +397,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `658 + l * (25 ±0) + s * (36 ±0)` // Estimated: `6196` - // Minimum execution time: 70_235_000 picoseconds. - Weight::from_parts(71_960_020, 6196) - // Standard Error: 2_493 - .saturating_add(Weight::from_parts(64_835, 0).saturating_mul(l.into())) - // Standard Error: 4_436 - .saturating_add(Weight::from_parts(102_159, 0).saturating_mul(s.into())) + // Minimum execution time: 70_190_000 picoseconds. + Weight::from_parts(72_355_984, 6196) + // Standard Error: 2_295 + .saturating_add(Weight::from_parts(59_968, 0).saturating_mul(l.into())) + // Standard Error: 4_084 + .saturating_add(Weight::from_parts(87_090, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -418,12 +420,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 34_352_000 picoseconds. - Weight::from_parts(33_697_027, 4764) - // Standard Error: 2_008 - .saturating_add(Weight::from_parts(79_270, 0).saturating_mul(l.into())) - // Standard Error: 3_710 - .saturating_add(Weight::from_parts(60_691, 0).saturating_mul(s.into())) + // Minimum execution time: 33_911_000 picoseconds. + Weight::from_parts(33_243_006, 4764) + // Standard Error: 1_199 + .saturating_add(Weight::from_parts(70_586, 0).saturating_mul(l.into())) + // Standard Error: 2_214 + .saturating_add(Weight::from_parts(60_985, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -441,12 +443,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `482 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 37_467_000 picoseconds. - Weight::from_parts(36_866_847, 4764) - // Standard Error: 1_692 - .saturating_add(Weight::from_parts(57_882, 0).saturating_mul(l.into())) - // Standard Error: 3_124 - .saturating_add(Weight::from_parts(80_266, 0).saturating_mul(s.into())) + // Minimum execution time: 36_242_000 picoseconds. + Weight::from_parts(35_812_994, 4764) + // Standard Error: 1_351 + .saturating_add(Weight::from_parts(68_256, 0).saturating_mul(l.into())) + // Standard Error: 2_496 + .saturating_add(Weight::from_parts(66_171, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -464,12 +466,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `555 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 41_497_000 picoseconds. - Weight::from_parts(38_763_834, 4764) - // Standard Error: 2_030 - .saturating_add(Weight::from_parts(99_580, 0).saturating_mul(l.into())) - // Standard Error: 3_750 - .saturating_add(Weight::from_parts(132_188, 0).saturating_mul(s.into())) + // Minimum execution time: 37_817_000 picoseconds. + Weight::from_parts(37_397_127, 4764) + // Standard Error: 1_665 + .saturating_add(Weight::from_parts(72_049, 0).saturating_mul(l.into())) + // Standard Error: 3_075 + .saturating_add(Weight::from_parts(60_973, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/substrate/frame/whitelist/src/weights.rs b/substrate/frame/whitelist/src/weights.rs index de42c5a5841cf8c2d3aea1227dafd35ec461b234..13b653f6ca7d367b83d5771f4805a61d8a7e40fd 100644 --- a/substrate/frame/whitelist/src/weights.rs +++ b/substrate/frame/whitelist/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_whitelist +//! Autogenerated weights for `pallet_whitelist` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/whitelist/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/whitelist/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_whitelist. +/// Weight functions needed for `pallet_whitelist`. pub trait WeightInfo { fn whitelist_call() -> Weight; fn remove_whitelisted_call() -> Weight; @@ -58,133 +57,149 @@ pub trait WeightInfo { fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight; } -/// Weights for pallet_whitelist using the Substrate node and recommended hardware. +/// Weights for `pallet_whitelist` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn whitelist_call() -> Weight { // Proof Size summary in bytes: - // Measured: `217` + // Measured: `317` // Estimated: `3556` - // Minimum execution time: 19_914_000 picoseconds. - Weight::from_parts(20_892_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 18_503_000 picoseconds. + Weight::from_parts(19_497_000, 3556) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn remove_whitelisted_call() -> Weight { // Proof Size summary in bytes: - // Measured: `346` + // Measured: `446` // Estimated: `3556` - // Minimum execution time: 18_142_000 picoseconds. - Weight::from_parts(18_529_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 17_633_000 picoseconds. + Weight::from_parts(18_196_000, 3556) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:1 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 4194294]`. fn dispatch_whitelisted_call(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `422 + n * (1 ±0)` - // Estimated: `3886 + n * (1 ±0)` - // Minimum execution time: 30_671_000 picoseconds. - Weight::from_parts(31_197_000, 3886) + // Measured: `522 + n * (1 ±0)` + // Estimated: `3986 + n * (1 ±0)` + // Minimum execution time: 28_020_000 picoseconds. + Weight::from_parts(28_991_000, 3986) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_163, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(Weight::from_parts(1_171, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 10000]`. fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `346` + // Measured: `446` // Estimated: `3556` - // Minimum execution time: 22_099_000 picoseconds. - Weight::from_parts(23_145_477, 3556) - // Standard Error: 5 - .saturating_add(Weight::from_parts(1_422, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 21_916_000 picoseconds. + Weight::from_parts(23_082_065, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(1_388, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn whitelist_call() -> Weight { // Proof Size summary in bytes: - // Measured: `217` + // Measured: `317` // Estimated: `3556` - // Minimum execution time: 19_914_000 picoseconds. - Weight::from_parts(20_892_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 18_503_000 picoseconds. + Weight::from_parts(19_497_000, 3556) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn remove_whitelisted_call() -> Weight { // Proof Size summary in bytes: - // Measured: `346` + // Measured: `446` // Estimated: `3556` - // Minimum execution time: 18_142_000 picoseconds. - Weight::from_parts(18_529_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 17_633_000 picoseconds. + Weight::from_parts(18_196_000, 3556) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:1 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 4194294]`. fn dispatch_whitelisted_call(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `422 + n * (1 ±0)` - // Estimated: `3886 + n * (1 ±0)` - // Minimum execution time: 30_671_000 picoseconds. - Weight::from_parts(31_197_000, 3886) + // Measured: `522 + n * (1 ±0)` + // Estimated: `3986 + n * (1 ±0)` + // Minimum execution time: 28_020_000 picoseconds. + Weight::from_parts(28_991_000, 3986) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_163, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(Weight::from_parts(1_171, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 10000]`. fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `346` + // Measured: `446` // Estimated: `3556` - // Minimum execution time: 22_099_000 picoseconds. - Weight::from_parts(23_145_477, 3556) - // Standard Error: 5 - .saturating_add(Weight::from_parts(1_422, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 21_916_000 picoseconds. + Weight::from_parts(23_082_065, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(1_388, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs index 2b1e65ec88524a0236a464005fb2d1215c1157fe..e34e4c0e7672164afb41702ab3bae87082df9062 100644 --- a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -456,6 +456,7 @@ impl<'a> ToClientSideDecl<'a> { |err| #crate_::ApiError::FailedToDecodeReturnValue { function: #function_name, error: err, + raw: r.clone(), } ) ) diff --git a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index c1339ff6621b389e32c2f9ca2a7f19079c2d269e..1761e0ac9dbf450c15adae2659119c918f300334 100644 --- a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -158,7 +158,7 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result::Hash, _: &<#block_type as #crate_::BlockT>::Header, - ) -> std::result::Result<(), #crate_::ApiError> { + ) -> std::result::Result<#crate_::__private::ExtrinsicInclusionMode, #crate_::ApiError> { unimplemented!("`Core::initialize_block` not implemented for runtime api mocks") } } diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index 190de1ab3fdee0611f27071b9c769a164c3aca9d..a945b9f21f3cff60d6a9d5e24aa07704a1dc8d31 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -101,7 +101,7 @@ pub mod __private { generic::BlockId, traits::{Block as BlockT, Hash as HashT, HashingFor, Header as HeaderT, NumberFor}, transaction_validity::TransactionValidity, - RuntimeString, TransactionOutcome, + ExtrinsicInclusionMode, RuntimeString, TransactionOutcome, }; pub use sp_std::{mem, slice, vec}; pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; @@ -115,11 +115,11 @@ pub use sp_core::traits::CallContext; use sp_core::OpaqueMetadata; #[cfg(feature = "std")] use sp_externalities::{Extension, Extensions}; -use sp_runtime::traits::Block as BlockT; #[cfg(feature = "std")] use sp_runtime::traits::HashingFor; #[cfg(feature = "std")] pub use sp_runtime::TransactionOutcome; +use sp_runtime::{traits::Block as BlockT, ExtrinsicInclusionMode}; #[cfg(feature = "std")] pub use sp_state_machine::StorageProof; #[cfg(feature = "std")] @@ -280,7 +280,7 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// ```rust /// use sp_version::create_runtime_str; /// # -/// # use sp_runtime::traits::Block as BlockT; +/// # use sp_runtime::{ExtrinsicInclusionMode, traits::Block as BlockT}; /// # use sp_test_primitives::Block; /// # /// # /// The declaration of the `Runtime` type is done by the `construct_runtime!` macro @@ -307,7 +307,9 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// # unimplemented!() /// # } /// # fn execute_block(_block: Block) {} -/// # fn initialize_block(_header: &::Header) {} +/// # fn initialize_block(_header: &::Header) -> ExtrinsicInclusionMode { +/// # unimplemented!() +/// # } /// # } /// /// impl self::Balance for Runtime { @@ -540,11 +542,12 @@ pub fn init_runtime_logger() { #[cfg(feature = "std")] #[derive(Debug, thiserror::Error)] pub enum ApiError { - #[error("Failed to decode return value of {function}")] + #[error("Failed to decode return value of {function}: {error} raw data: {raw:?}")] FailedToDecodeReturnValue { function: &'static str, #[source] error: codec::Error, + raw: Vec, }, #[error("Failed to convert return value from runtime to node of {function}")] FailedToConvertReturnValue { @@ -800,15 +803,18 @@ pub fn deserialize_runtime_api_info(bytes: [u8; RUNTIME_API_INFO_SIZE]) -> ([u8; decl_runtime_apis! { /// The `Core` runtime api that every Substrate runtime needs to implement. #[core_trait] - #[api_version(4)] + #[api_version(5)] pub trait Core { /// Returns the version of the runtime. fn version() -> RuntimeVersion; /// Execute the given block. fn execute_block(block: Block); /// Initialize a block with the given header. + #[changed_in(5)] #[renamed("initialise_block", 2)] fn initialize_block(header: &::Header); + /// Initialize a block with the given header and return the runtime executive mode. + fn initialize_block(header: &::Header) -> ExtrinsicInclusionMode; } /// The `Metadata` api trait that returns metadata for the runtime. diff --git a/substrate/primitives/api/test/tests/decl_and_impl.rs b/substrate/primitives/api/test/tests/decl_and_impl.rs index d68470551d20061aa09d277a9999db81654ff47d..211a08561fd4bcf601d9508d96447fc07f82657f 100644 --- a/substrate/primitives/api/test/tests/decl_and_impl.rs +++ b/substrate/primitives/api/test/tests/decl_and_impl.rs @@ -139,7 +139,7 @@ impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs index 43718e4cd04a3f25fcc805edec3c14283f1f5f56..262a874213a5662c0c87a79989b620c661e707c7 100644 --- a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs +++ b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs @@ -40,7 +40,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/impl_missing_version.rs b/substrate/primitives/api/test/tests/ui/impl_missing_version.rs index 560257b5168c921ce112a56fcbd7e9c303363c77..58850ab343fbb7bcc2c04b01c7aec9cb881023d0 100644 --- a/substrate/primitives/api/test/tests/ui/impl_missing_version.rs +++ b/substrate/primitives/api/test/tests/ui/impl_missing_version.rs @@ -45,7 +45,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/missing_versioned_method.rs b/substrate/primitives/api/test/tests/ui/missing_versioned_method.rs index 6ead545f85a11e8f0382331e77145acb4a053d5e..70f75d065154afcb807e1712c95fc24eca5267b1 100644 --- a/substrate/primitives/api/test/tests/ui/missing_versioned_method.rs +++ b/substrate/primitives/api/test/tests/ui/missing_versioned_method.rs @@ -44,7 +44,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs b/substrate/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs index 8eebc1d79babcbd6fb95876a530a920397c035a6..63032000040b7d19f8e31fc4ac3221369aef9e7e 100644 --- a/substrate/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs +++ b/substrate/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs @@ -47,7 +47,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs b/substrate/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs index 594556d57be50dbd151de486dc22a63a733b933b..0858813bc99941be005e0c0141b2defc3a96a3d3 100644 --- a/substrate/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs +++ b/substrate/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs @@ -51,7 +51,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/positive_cases/default_impls.rs b/substrate/primitives/api/test/tests/ui/positive_cases/default_impls.rs index ae573238ffe144d26b7e5dd4528e8a9e5ab88a9e..3e0cb79156c8bf5c218e29e3024d181a69fc4da6 100644 --- a/substrate/primitives/api/test/tests/ui/positive_cases/default_impls.rs +++ b/substrate/primitives/api/test/tests/ui/positive_cases/default_impls.rs @@ -46,7 +46,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs index 921bf0d04351dac4ddcdd18030619f1ebcf15008..b2caea7ab7e44d910628d0ac834017e66e7e5d2e 100644 --- a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs +++ b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs @@ -42,7 +42,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/consensus/babe/src/inherents.rs b/substrate/primitives/consensus/babe/src/inherents.rs index 909769f3031be658e758087c4323ed7ec80fdcce..54b7b64401673927602a3e816cc9d5dcb9ede14a 100644 --- a/substrate/primitives/consensus/babe/src/inherents.rs +++ b/substrate/primitives/consensus/babe/src/inherents.rs @@ -17,7 +17,6 @@ //! Inherents for BABE -use core::result::Result; use sp_inherents::{Error, InherentData, InherentIdentifier}; /// The BABE inherent identifier. diff --git a/substrate/primitives/genesis-builder/Cargo.toml b/substrate/primitives/genesis-builder/Cargo.toml index bf12433c5f4048c9ad4b6dca688ffa6ef0cd3a8e..15440b4811ec4664725f8b0df5335748917b20fc 100644 --- a/substrate/primitives/genesis-builder/Cargo.toml +++ b/substrate/primitives/genesis-builder/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" repository.workspace = true -description = "Substrate GenesisConfig builder API" +description = "Substrate RuntimeGenesisConfig builder API" readme = "README.md" [lints] diff --git a/substrate/primitives/genesis-builder/src/lib.rs b/substrate/primitives/genesis-builder/src/lib.rs index e002cd3aa6f704d8889fd35d04fdd65b46a1b754..bb1a2a352488ac0c03cfcd35dac1b45444b1d9a1 100644 --- a/substrate/primitives/genesis-builder/src/lib.rs +++ b/substrate/primitives/genesis-builder/src/lib.rs @@ -19,36 +19,37 @@ //! Substrate genesis config builder //! -//! This Runtime API allows to construct `GenesisConfig`, in particular: -//! - serialize the runtime default `GenesisConfig` struct into json format, -//! - put the GenesisConfig struct into the storage. Internally this operation calls +//! This Runtime API allows to construct `RuntimeGenesisConfig`, in particular: +//! - serialize the runtime default `RuntimeGenesisConfig` struct into json format, +//! - put the RuntimeGenesisConfig struct into the storage. Internally this operation calls //! `GenesisBuild::build` function for all runtime pallets, which is typically provided by //! pallet's author. -//! - deserialize the `GenesisConfig` from given json blob and put `GenesisConfig` into the state -//! storage. Allows to build customized configuration. +//! - deserialize the `RuntimeGenesisConfig` from given json blob and put `RuntimeGenesisConfig` +//! into the state storage. Allows to build customized configuration. //! -//! Providing externalities with empty storage and putting `GenesisConfig` into storage allows to -//! catch and build the raw storage of `GenesisConfig` which is the foundation for genesis block. +//! Providing externalities with empty storage and putting `RuntimeGenesisConfig` into storage +//! allows to catch and build the raw storage of `RuntimeGenesisConfig` which is the foundation for +//! genesis block. /// The result type alias, used in build methods. `Err` contains formatted error message. pub type Result = core::result::Result<(), sp_runtime::RuntimeString>; sp_api::decl_runtime_apis! { - /// API to interact with GenesisConfig for the runtime + /// API to interact with RuntimeGenesisConfig for the runtime pub trait GenesisBuilder { - /// Creates the default `GenesisConfig` and returns it as a JSON blob. + /// Creates the default `RuntimeGenesisConfig` and returns it as a JSON blob. /// - /// This function instantiates the default `GenesisConfig` struct for the runtime and serializes it into a JSON - /// blob. It returns a `Vec` containing the JSON representation of the default `GenesisConfig`. + /// This function instantiates the default `RuntimeGenesisConfig` struct for the runtime and serializes it into a JSON + /// blob. It returns a `Vec` containing the JSON representation of the default `RuntimeGenesisConfig`. fn create_default_config() -> sp_std::vec::Vec; - /// Build `GenesisConfig` from a JSON blob not using any defaults and store it in the storage. + /// Build `RuntimeGenesisConfig` from a JSON blob not using any defaults and store it in the storage. /// - /// This function deserializes the full `GenesisConfig` from the given JSON blob and puts it into the storage. + /// This function deserializes the full `RuntimeGenesisConfig` from the given JSON blob and puts it into the storage. /// If the provided JSON blob is incorrect or incomplete or the deserialization fails, an error is returned. /// It is recommended to log any errors encountered during the process. /// - /// Please note that provided json blob must contain all `GenesisConfig` fields, no defaults will be used. + /// Please note that provided json blob must contain all `RuntimeGenesisConfig` fields, no defaults will be used. fn build_config(json: sp_std::vec::Vec) -> Result; } } diff --git a/substrate/primitives/inherents/src/lib.rs b/substrate/primitives/inherents/src/lib.rs index dd7c294f1e245126bd02ef14a470595dc13841bb..415319a6849c97f9a0c89b663bb4248a50d5b7dd 100644 --- a/substrate/primitives/inherents/src/lib.rs +++ b/substrate/primitives/inherents/src/lib.rs @@ -98,10 +98,10 @@ //! and production. //! //! ``` -//! # use sp_runtime::testing::ExtrinsicWrapper; +//! # use sp_runtime::{generic::UncheckedExtrinsic, testing::MockCallU64}; //! # use sp_inherents::{InherentIdentifier, InherentData}; //! # use futures::FutureExt; -//! # type Block = sp_runtime::testing::Block>; +//! # type Block = sp_runtime::testing::Block>; //! # const INHERENT_IDENTIFIER: InherentIdentifier = *b"testinh0"; //! # struct InherentDataProvider; //! # #[async_trait::async_trait] diff --git a/substrate/primitives/metadata-ir/src/lib.rs b/substrate/primitives/metadata-ir/src/lib.rs index edfa58f8618942e34b6937733cbd2a5f5c4dffbb..66aff2c599fc9438c2e42bcd3c409d8fbabe6dca 100644 --- a/substrate/primitives/metadata-ir/src/lib.rs +++ b/substrate/primitives/metadata-ir/src/lib.rs @@ -84,7 +84,7 @@ mod test { call_ty: meta_type::<()>(), signature_ty: meta_type::<()>(), extra_ty: meta_type::<()>(), - signed_extensions: vec![], + extensions: vec![], }, ty: meta_type::<()>(), apis: vec![], diff --git a/substrate/primitives/metadata-ir/src/types.rs b/substrate/primitives/metadata-ir/src/types.rs index b107d20a8e2bfbf55d0d558bb60346cb7a52dd70..e60881ed6b8d86df2bc0b92c744b569168e3f75d 100644 --- a/substrate/primitives/metadata-ir/src/types.rs +++ b/substrate/primitives/metadata-ir/src/types.rs @@ -166,10 +166,11 @@ pub struct ExtrinsicMetadataIR { pub call_ty: T::Type, /// The type of the extrinsic's signature. pub signature_ty: T::Type, - /// The type of the outermost Extra enum. + /// The type of the outermost Extra/Extensions enum. + // TODO: metadata-v16: rename this to `extension_ty`. pub extra_ty: T::Type, - /// The signed extensions in the order they appear in the extrinsic. - pub signed_extensions: Vec>, + /// The transaction extensions in the order they appear in the extrinsic. + pub extensions: Vec>, } impl IntoPortable for ExtrinsicMetadataIR { @@ -183,27 +184,27 @@ impl IntoPortable for ExtrinsicMetadataIR { call_ty: registry.register_type(&self.call_ty), signature_ty: registry.register_type(&self.signature_ty), extra_ty: registry.register_type(&self.extra_ty), - signed_extensions: registry.map_into_portable(self.signed_extensions), + extensions: registry.map_into_portable(self.extensions), } } } /// Metadata of an extrinsic's signed extension. #[derive(Clone, PartialEq, Eq, Encode, Debug)] -pub struct SignedExtensionMetadataIR { +pub struct TransactionExtensionMetadataIR { /// The unique signed extension identifier, which may be different from the type name. pub identifier: T::String, /// The type of the signed extension, with the data to be included in the extrinsic. pub ty: T::Type, - /// The type of the additional signed data, with the data to be included in the signed payload + /// The type of the additional signed data, with the data to be included in the signed payload. pub additional_signed: T::Type, } -impl IntoPortable for SignedExtensionMetadataIR { - type Output = SignedExtensionMetadataIR; +impl IntoPortable for TransactionExtensionMetadataIR { + type Output = TransactionExtensionMetadataIR; fn into_portable(self, registry: &mut Registry) -> Self::Output { - SignedExtensionMetadataIR { + TransactionExtensionMetadataIR { identifier: self.identifier.into_portable(registry), ty: registry.register_type(&self.ty), additional_signed: registry.register_type(&self.additional_signed), diff --git a/substrate/primitives/metadata-ir/src/v14.rs b/substrate/primitives/metadata-ir/src/v14.rs index e1b7a24f76577c09f7f8c8040ae06bf433f9126d..ec08de3478622421214bc40f7ea4a5b1dca6cadc 100644 --- a/substrate/primitives/metadata-ir/src/v14.rs +++ b/substrate/primitives/metadata-ir/src/v14.rs @@ -20,8 +20,8 @@ use super::types::{ ExtrinsicMetadataIR, MetadataIR, PalletCallMetadataIR, PalletConstantMetadataIR, PalletErrorMetadataIR, PalletEventMetadataIR, PalletMetadataIR, PalletStorageMetadataIR, - SignedExtensionMetadataIR, StorageEntryMetadataIR, StorageEntryModifierIR, StorageEntryTypeIR, - StorageHasherIR, + StorageEntryMetadataIR, StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR, + TransactionExtensionMetadataIR, }; use frame_metadata::v14::{ @@ -137,8 +137,8 @@ impl From for PalletErrorMetadata { } } -impl From for SignedExtensionMetadata { - fn from(ir: SignedExtensionMetadataIR) -> Self { +impl From for SignedExtensionMetadata { + fn from(ir: TransactionExtensionMetadataIR) -> Self { SignedExtensionMetadata { identifier: ir.identifier, ty: ir.ty, @@ -152,7 +152,7 @@ impl From for ExtrinsicMetadata { ExtrinsicMetadata { ty: ir.ty, version: ir.version, - signed_extensions: ir.signed_extensions.into_iter().map(Into::into).collect(), + signed_extensions: ir.extensions.into_iter().map(Into::into).collect(), } } } diff --git a/substrate/primitives/metadata-ir/src/v15.rs b/substrate/primitives/metadata-ir/src/v15.rs index a942eb73223b2c80e2d71ef1cb070c45a2ca588f..dc5ce1c88fdfa5b32d4304a0e164c98b450f5005 100644 --- a/substrate/primitives/metadata-ir/src/v15.rs +++ b/substrate/primitives/metadata-ir/src/v15.rs @@ -21,7 +21,7 @@ use crate::OuterEnumsIR; use super::types::{ ExtrinsicMetadataIR, MetadataIR, PalletMetadataIR, RuntimeApiMetadataIR, - RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, SignedExtensionMetadataIR, + RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, TransactionExtensionMetadataIR, }; use frame_metadata::v15::{ @@ -87,8 +87,8 @@ impl From for PalletMetadata { } } -impl From for SignedExtensionMetadata { - fn from(ir: SignedExtensionMetadataIR) -> Self { +impl From for SignedExtensionMetadata { + fn from(ir: TransactionExtensionMetadataIR) -> Self { SignedExtensionMetadata { identifier: ir.identifier, ty: ir.ty, @@ -105,7 +105,7 @@ impl From for ExtrinsicMetadata { call_ty: ir.call_ty, signature_ty: ir.signature_ty, extra_ty: ir.extra_ty, - signed_extensions: ir.signed_extensions.into_iter().map(Into::into).collect(), + signed_extensions: ir.extensions.into_iter().map(Into::into).collect(), } } } diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index cacfc059722942f2fe70d951f8016449914c2e5b..758cd8944fdc1603f569e966f0dea2347c830635 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -17,6 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +tuplex = { version = "0.1.2", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } either = { version = "1.5", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } @@ -67,6 +68,7 @@ std = [ "sp-std/std", "sp-tracing/std", "sp-weights/std", + "tuplex/std", ] # Serde support without relying on std features. diff --git a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs index 44325920beee0c1026c4ec7dc68b6943652dfc92..e28c8fe424abc7a0d27d41a6751e1118f50d945d 100644 --- a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs @@ -18,81 +18,115 @@ //! Generic implementation of an extrinsic that has passed the verification //! stage. +use codec::Encode; + use crate::{ traits::{ - self, DispatchInfoOf, Dispatchable, MaybeDisplay, Member, PostDispatchInfoOf, - SignedExtension, ValidateUnsigned, + self, transaction_extension::TransactionExtension, DispatchInfoOf, DispatchTransaction, + Dispatchable, MaybeDisplay, Member, PostDispatchInfoOf, ValidateUnsigned, }, transaction_validity::{TransactionSource, TransactionValidity}, }; +/// The kind of extrinsic this is, including any fields required of that kind. This is basically +/// the full extrinsic except the `Call`. +#[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)] +pub enum ExtrinsicFormat { + /// Extrinsic is bare; it must pass either the bare forms of `TransactionExtension` or + /// `ValidateUnsigned`, both deprecated, or alternatively a `ProvideInherent`. + Bare, + /// Extrinsic has a default `Origin` of `Signed(AccountId)` and must pass all + /// `TransactionExtension`s regular checks and includes all extension data. + Signed(AccountId, Extension), + /// Extrinsic has a default `Origin` of `None` and must pass all `TransactionExtension`s. + /// regular checks and includes all extension data. + General(Extension), +} + +// TODO: Rename ValidateUnsigned to ValidateInherent +// TODO: Consider changing ValidateInherent API to avoid need for duplicating validate +// code into pre_dispatch (rename that to `prepare`). +// TODO: New extrinsic type corresponding to `ExtrinsicFormat::General`, which is +// unsigned but includes extension data. +// TODO: Move usage of `signed` to `format`: +// - Inherent instead of None. +// - Signed(id, extension) instead of Some((id, extra)). +// - Introduce General(extension) for one without a signature. + /// Definition of something that the external world might want to say; its existence implies that it /// has been checked and is good, particularly with regards to the signature. /// /// This is typically passed into [`traits::Applyable::apply`], which should execute /// [`CheckedExtrinsic::function`], alongside all other bits and bobs. #[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)] -pub struct CheckedExtrinsic { +pub struct CheckedExtrinsic { /// Who this purports to be from and the number of extrinsics have come before /// from the same signer, if anyone (note this is not a signature). - pub signed: Option<(AccountId, Extra)>, + pub format: ExtrinsicFormat, /// The function that should be called. pub function: Call, } -impl traits::Applyable - for CheckedExtrinsic +impl traits::Applyable + for CheckedExtrinsic where AccountId: Member + MaybeDisplay, - Call: Member + Dispatchable, - Extra: SignedExtension, + Call: Member + Dispatchable + Encode, + Extension: TransactionExtension, RuntimeOrigin: From>, { type Call = Call; - fn validate>( + fn validate>( &self, - // TODO [#5006;ToDr] should source be passed to `SignedExtension`s? - // Perhaps a change for 2.0 to avoid breaking too much APIs? source: TransactionSource, info: &DispatchInfoOf, len: usize, ) -> TransactionValidity { - if let Some((ref id, ref extra)) = self.signed { - Extra::validate(extra, id, &self.function, info, len) - } else { - let valid = Extra::validate_unsigned(&self.function, info, len)?; - let unsigned_validation = U::validate_unsigned(source, &self.function)?; - Ok(valid.combine_with(unsigned_validation)) + match self.format { + ExtrinsicFormat::Bare => { + let inherent_validation = I::validate_unsigned(source, &self.function)?; + #[allow(deprecated)] + let legacy_validation = Extension::validate_bare_compat(&self.function, info, len)?; + Ok(legacy_validation.combine_with(inherent_validation)) + }, + ExtrinsicFormat::Signed(ref signer, ref extension) => { + let origin = Some(signer.clone()).into(); + extension.validate_only(origin, &self.function, info, len).map(|x| x.0) + }, + ExtrinsicFormat::General(ref extension) => + extension.validate_only(None.into(), &self.function, info, len).map(|x| x.0), } } - fn apply>( + fn apply>( self, info: &DispatchInfoOf, len: usize, ) -> crate::ApplyExtrinsicResultWithInfo> { - let (maybe_who, maybe_pre) = if let Some((id, extra)) = self.signed { - let pre = Extra::pre_dispatch(extra, &id, &self.function, info, len)?; - (Some(id), Some(pre)) - } else { - Extra::pre_dispatch_unsigned(&self.function, info, len)?; - U::pre_dispatch(&self.function)?; - (None, None) - }; - let res = self.function.dispatch(RuntimeOrigin::from(maybe_who)); - let post_info = match res { - Ok(info) => info, - Err(err) => err.post_info, - }; - Extra::post_dispatch( - maybe_pre, - info, - &post_info, - len, - &res.map(|_| ()).map_err(|e| e.error), - )?; - Ok(res) + match self.format { + ExtrinsicFormat::Bare => { + I::pre_dispatch(&self.function)?; + // TODO: Remove below once `pre_dispatch_unsigned` is removed from `LegacyExtension` + // or `LegacyExtension` is removed. + #[allow(deprecated)] + Extension::validate_bare_compat(&self.function, info, len)?; + #[allow(deprecated)] + Extension::pre_dispatch_bare_compat(&self.function, info, len)?; + let res = self.function.dispatch(None.into()); + let post_info = res.unwrap_or_else(|err| err.post_info); + let pd_res = res.map(|_| ()).map_err(|e| e.error); + // TODO: Remove below once `pre_dispatch_unsigned` is removed from `LegacyExtension` + // or `LegacyExtension` is removed. + #[allow(deprecated)] + Extension::post_dispatch_bare_compat(info, &post_info, len, &pd_res)?; + Ok(res) + }, + ExtrinsicFormat::Signed(signer, extension) => + extension.dispatch_transaction(Some(signer).into(), self.function, info, len), + ExtrinsicFormat::General(extension) => + extension.dispatch_transaction(None.into(), self.function, info, len), + } } } diff --git a/substrate/primitives/runtime/src/generic/mod.rs b/substrate/primitives/runtime/src/generic/mod.rs index 3687f7cdb3b2b24c087ac47c470b4d39721e2df5..5713c166b04c81045ccf458a538d4ca083178840 100644 --- a/substrate/primitives/runtime/src/generic/mod.rs +++ b/substrate/primitives/runtime/src/generic/mod.rs @@ -29,9 +29,9 @@ mod unchecked_extrinsic; pub use self::{ block::{Block, BlockId, SignedBlock}, - checked_extrinsic::CheckedExtrinsic, + checked_extrinsic::{CheckedExtrinsic, ExtrinsicFormat}, digest::{Digest, DigestItem, DigestItemRef, OpaqueDigestItemId}, era::{Era, Phase}, header::Header, - unchecked_extrinsic::{SignedPayload, UncheckedExtrinsic}, + unchecked_extrinsic::{Preamble, SignedPayload, UncheckedExtrinsic}, }; diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 5b54caf597b7396dd5f32b0f695db2e011a9fca1..44dfda13d9441c2a2408628d58e57f673719ffcf 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -18,10 +18,11 @@ //! Generic implementation of an unchecked (pre-verification) extrinsic. use crate::{ - generic::CheckedExtrinsic, + generic::{CheckedExtrinsic, ExtrinsicFormat}, traits::{ - self, Checkable, Extrinsic, ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, - SignaturePayload, SignedExtension, + self, transaction_extension::TransactionExtensionBase, Checkable, Dispatchable, Extrinsic, + ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, SignaturePayload, + TransactionExtension, }, transaction_validity::{InvalidTransaction, TransactionValidityError}, OpaqueExtrinsic, @@ -40,8 +41,60 @@ use sp_std::{fmt, prelude::*}; /// the decoding fails. const EXTRINSIC_FORMAT_VERSION: u8 = 4; -/// The `SingaturePayload` of `UncheckedExtrinsic`. -type UncheckedSignaturePayload = (Address, Signature, Extra); +/// The `SignaturePayload` of `UncheckedExtrinsic`. +type UncheckedSignaturePayload = (Address, Signature, Extension); + +impl SignaturePayload + for UncheckedSignaturePayload +{ + type SignatureAddress = Address; + type Signature = Signature; + type SignatureExtra = Extension; +} + +/// A "header" for extrinsics leading up to the call itself. Determines the type of extrinsic and +/// holds any necessary specialized data. +#[derive(Eq, PartialEq, Clone, Encode, Decode)] +pub enum Preamble { + /// An extrinsic without a signature or any extension. This means it's either an inherent or + /// an old-school "Unsigned" (we don't use that terminology any more since it's confusable with + /// the general transaction which is without a signature but does have an extension). + /// + /// NOTE: In the future, once we remove `ValidateUnsigned`, this will only serve Inherent + /// extrinsics and thus can be renamed to `Inherent`. + #[codec(index = 0b00000100)] + Bare, + /// An old-school transaction extrinsic which includes a signature of some hard-coded crypto. + #[codec(index = 0b10000100)] + Signed(Address, Signature, Extension), + /// A new-school transaction extrinsic which does not include a signature. + #[codec(index = 0b01000100)] + General(Extension), +} + +impl Preamble { + /// Returns `Some` if this is a signed extrinsic, together with the relevant inner fields. + pub fn to_signed(self) -> Option<(Address, Signature, Extension)> { + match self { + Self::Signed(a, s, e) => Some((a, s, e)), + _ => None, + } + } +} + +impl fmt::Debug for Preamble +where + Address: fmt::Debug, + Extension: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Bare => write!(f, "Bare"), + Self::Signed(address, _, tx_ext) => write!(f, "Signed({:?}, {:?})", address, tx_ext), + Self::General(tx_ext) => write!(f, "General({:?})", tx_ext), + } + } +} /// An extrinsic right from the external world. This is unchecked and so can contain a signature. /// @@ -65,41 +118,28 @@ type UncheckedSignaturePayload = (Address, Signature, /// This can be checked using [`Checkable`], yielding a [`CheckedExtrinsic`], which is the /// counterpart of this type after its signature (and other non-negotiable validity checks) have /// passed. -#[derive(PartialEq, Eq, Clone)] -pub struct UncheckedExtrinsic -where - Extra: SignedExtension, -{ - /// The signature, address, number of extrinsics have come before from the same signer and an - /// era describing the longevity of this transaction, if this is a signed extrinsic. - /// - /// `None` if it is unsigned or an inherent. - pub signature: Option>, +#[derive(PartialEq, Eq, Clone, Debug)] +pub struct UncheckedExtrinsic { + /// Information regarding the type of extrinsic this is (inherent or transaction) as well as + /// associated extension (`Extension`) data if it's a transaction and a possible signature. + pub preamble: Preamble, /// The function that should be called. pub function: Call, } -impl SignaturePayload - for UncheckedSignaturePayload -{ - type SignatureAddress = Address; - type Signature = Signature; - type SignatureExtra = Extra; -} - /// Manual [`TypeInfo`] implementation because of custom encoding. The data is a valid encoded /// `Vec`, but requires some logic to extract the signature and payload. /// /// See [`UncheckedExtrinsic::encode`] and [`UncheckedExtrinsic::decode`]. -impl TypeInfo - for UncheckedExtrinsic +impl TypeInfo + for UncheckedExtrinsic where Address: StaticTypeInfo, Call: StaticTypeInfo, Signature: StaticTypeInfo, - Extra: SignedExtension + StaticTypeInfo, + Extension: StaticTypeInfo, { - type Identity = UncheckedExtrinsic; + type Identity = UncheckedExtrinsic; fn type_info() -> Type { Type::builder() @@ -111,7 +151,7 @@ where TypeParameter::new("Address", Some(meta_type::
())), TypeParameter::new("Call", Some(meta_type::())), TypeParameter::new("Signature", Some(meta_type::())), - TypeParameter::new("Extra", Some(meta_type::())), + TypeParameter::new("Extra", Some(meta_type::())), ]) .docs(&["UncheckedExtrinsic raw bytes, requires custom decoding routine"]) // Because of the custom encoding, we can only accurately describe the encoding as an @@ -121,66 +161,113 @@ where } } -impl - UncheckedExtrinsic -{ - /// New instance of a signed extrinsic aka "transaction". - pub fn new_signed(function: Call, signed: Address, signature: Signature, extra: Extra) -> Self { - Self { signature: Some((signed, signature, extra)), function } +impl UncheckedExtrinsic { + /// New instance of a bare (ne unsigned) extrinsic. This could be used for an inherent or an + /// old-school "unsigned transaction" (which are new being deprecated in favour of general + /// transactions). + #[deprecated = "Use new_bare instead"] + pub fn new_unsigned(function: Call) -> Self { + Self::new_bare(function) } - /// New instance of an unsigned extrinsic aka "inherent". - pub fn new_unsigned(function: Call) -> Self { - Self { signature: None, function } + /// Returns `true` if this extrinsic instance is an inherent, `false`` otherwise. + pub fn is_inherent(&self) -> bool { + matches!(self.preamble, Preamble::Bare) + } + + /// Returns `true` if this extrinsic instance is an old-school signed transaction, `false` + /// otherwise. + pub fn is_signed(&self) -> bool { + matches!(self.preamble, Preamble::Signed(..)) + } + + /// Create an `UncheckedExtrinsic` from a `Preamble` and the actual `Call`. + pub fn from_parts(function: Call, preamble: Preamble) -> Self { + Self { preamble, function } + } + + /// New instance of a bare (ne unsigned) extrinsic. + pub fn new_bare(function: Call) -> Self { + Self { preamble: Preamble::Bare, function } + } + + /// New instance of an old-school signed transaction. + pub fn new_signed( + function: Call, + signed: Address, + signature: Signature, + tx_ext: Extension, + ) -> Self { + Self { preamble: Preamble::Signed(signed, signature, tx_ext), function } + } + + /// New instance of an new-school unsigned transaction. + pub fn new_transaction(function: Call, tx_ext: Extension) -> Self { + Self { preamble: Preamble::General(tx_ext), function } } } -impl - Extrinsic for UncheckedExtrinsic +// TODO: We can get rid of this trait and just use UncheckedExtrinsic directly. + +impl Extrinsic + for UncheckedExtrinsic { type Call = Call; - type SignaturePayload = UncheckedSignaturePayload; + type SignaturePayload = UncheckedSignaturePayload; + + fn is_bare(&self) -> bool { + matches!(self.preamble, Preamble::Bare) + } fn is_signed(&self) -> Option { - Some(self.signature.is_some()) + Some(matches!(self.preamble, Preamble::Signed(..))) } fn new(function: Call, signed_data: Option) -> Option { Some(if let Some((address, signature, extra)) = signed_data { Self::new_signed(function, address, signature, extra) } else { - Self::new_unsigned(function) + Self::new_bare(function) }) } + + fn new_inherent(function: Call) -> Self { + Self::new_bare(function) + } } -impl Checkable - for UncheckedExtrinsic +impl Checkable + for UncheckedExtrinsic where LookupSource: Member + MaybeDisplay, - Call: Encode + Member, + Call: Encode + Member + Dispatchable, Signature: Member + traits::Verify, ::Signer: IdentifyAccount, - Extra: SignedExtension, + Extension: Encode + TransactionExtension, AccountId: Member + MaybeDisplay, Lookup: traits::Lookup, { - type Checked = CheckedExtrinsic; + type Checked = CheckedExtrinsic; fn check(self, lookup: &Lookup) -> Result { - Ok(match self.signature { - Some((signed, signature, extra)) => { + Ok(match self.preamble { + Preamble::Signed(signed, signature, tx_ext) => { let signed = lookup.lookup(signed)?; - let raw_payload = SignedPayload::new(self.function, extra)?; + // CHECK! Should this not contain implicit? + let raw_payload = SignedPayload::new(self.function, tx_ext)?; if !raw_payload.using_encoded(|payload| signature.verify(payload, &signed)) { return Err(InvalidTransaction::BadProof.into()) } - - let (function, extra, _) = raw_payload.deconstruct(); - CheckedExtrinsic { signed: Some((signed, extra)), function } + let (function, tx_ext, _) = raw_payload.deconstruct(); + CheckedExtrinsic { format: ExtrinsicFormat::Signed(signed, tx_ext), function } + }, + Preamble::General(tx_ext) => CheckedExtrinsic { + format: ExtrinsicFormat::General(tx_ext), + function: self.function, }, - None => CheckedExtrinsic { signed: None, function: self.function }, + Preamble::Bare => + CheckedExtrinsic { format: ExtrinsicFormat::Bare, function: self.function }, }) } @@ -189,91 +276,38 @@ where self, lookup: &Lookup, ) -> Result { - Ok(match self.signature { - Some((signed, _, extra)) => { + Ok(match self.preamble { + Preamble::Signed(signed, _, extra) => { let signed = lookup.lookup(signed)?; - let raw_payload = SignedPayload::new(self.function, extra)?; - let (function, extra, _) = raw_payload.deconstruct(); - CheckedExtrinsic { signed: Some((signed, extra)), function } + CheckedExtrinsic { + format: ExtrinsicFormat::Signed(signed, extra), + function: self.function, + } }, - None => CheckedExtrinsic { signed: None, function: self.function }, + Preamble::General(extra) => CheckedExtrinsic { + format: ExtrinsicFormat::General(extra), + function: self.function, + }, + Preamble::Bare => + CheckedExtrinsic { format: ExtrinsicFormat::Bare, function: self.function }, }) } } -impl ExtrinsicMetadata - for UncheckedExtrinsic -where - Extra: SignedExtension, +impl> + ExtrinsicMetadata for UncheckedExtrinsic { const VERSION: u8 = EXTRINSIC_FORMAT_VERSION; - type SignedExtensions = Extra; -} - -/// A payload that has been signed for an unchecked extrinsics. -/// -/// Note that the payload that we sign to produce unchecked extrinsic signature -/// is going to be different than the `SignaturePayload` - so the thing the extrinsic -/// actually contains. -pub struct SignedPayload((Call, Extra, Extra::AdditionalSigned)); - -impl SignedPayload -where - Call: Encode, - Extra: SignedExtension, -{ - /// Create new `SignedPayload`. - /// - /// This function may fail if `additional_signed` of `Extra` is not available. - pub fn new(call: Call, extra: Extra) -> Result { - let additional_signed = extra.additional_signed()?; - let raw_payload = (call, extra, additional_signed); - Ok(Self(raw_payload)) - } - - /// Create new `SignedPayload` from raw components. - pub fn from_raw(call: Call, extra: Extra, additional_signed: Extra::AdditionalSigned) -> Self { - Self((call, extra, additional_signed)) - } - - /// Deconstruct the payload into it's components. - pub fn deconstruct(self) -> (Call, Extra, Extra::AdditionalSigned) { - self.0 - } + type Extra = Extension; } -impl Encode for SignedPayload -where - Call: Encode, - Extra: SignedExtension, -{ - /// Get an encoded version of this payload. - /// - /// Payloads longer than 256 bytes are going to be `blake2_256`-hashed. - fn using_encoded R>(&self, f: F) -> R { - self.0.using_encoded(|payload| { - if payload.len() > 256 { - f(&blake2_256(payload)[..]) - } else { - f(payload) - } - }) - } -} - -impl EncodeLike for SignedPayload -where - Call: Encode, - Extra: SignedExtension, -{ -} - -impl Decode for UncheckedExtrinsic +impl Decode + for UncheckedExtrinsic where Address: Decode, Signature: Decode, Call: Decode, - Extra: SignedExtension, + Extension: Decode, { fn decode(input: &mut I) -> Result { // This is a little more complicated than usual since the binary format must be compatible @@ -282,15 +316,7 @@ where let expected_length: Compact = Decode::decode(input)?; let before_length = input.remaining_len()?; - let version = input.read_byte()?; - - let is_signed = version & 0b1000_0000 != 0; - let version = version & 0b0111_1111; - if version != EXTRINSIC_FORMAT_VERSION { - return Err("Invalid transaction version".into()) - } - - let signature = is_signed.then(|| Decode::decode(input)).transpose()?; + let preamble = Decode::decode(input)?; let function = Decode::decode(input)?; if let Some((before_length, after_length)) = @@ -303,31 +329,20 @@ where } } - Ok(Self { signature, function }) + Ok(Self { preamble, function }) } } #[docify::export(unchecked_extrinsic_encode_impl)] -impl Encode for UncheckedExtrinsic +impl Encode + for UncheckedExtrinsic where - Address: Encode, - Signature: Encode, + Preamble: Encode, Call: Encode, - Extra: SignedExtension, + Extension: Encode, { fn encode(&self) -> Vec { - let mut tmp = Vec::with_capacity(sp_std::mem::size_of::()); - - // 1 byte version id. - match self.signature.as_ref() { - Some(s) => { - tmp.push(EXTRINSIC_FORMAT_VERSION | 0b1000_0000); - s.encode_to(&mut tmp); - }, - None => { - tmp.push(EXTRINSIC_FORMAT_VERSION & 0b0111_1111); - }, - } + let mut tmp = self.preamble.encode(); self.function.encode_to(&mut tmp); let compact_len = codec::Compact::(tmp.len() as u32); @@ -342,19 +357,19 @@ where } } -impl EncodeLike - for UncheckedExtrinsic +impl EncodeLike + for UncheckedExtrinsic where Address: Encode, Signature: Encode, - Call: Encode, - Extra: SignedExtension, + Call: Encode + Dispatchable, + Extension: TransactionExtension, { } #[cfg(feature = "serde")] -impl serde::Serialize - for UncheckedExtrinsic +impl serde::Serialize + for UncheckedExtrinsic { fn serialize(&self, seq: S) -> Result where @@ -365,45 +380,88 @@ impl s } #[cfg(feature = "serde")] -impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extra: SignedExtension> - serde::Deserialize<'a> for UncheckedExtrinsic +impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extension: Decode> serde::Deserialize<'a> + for UncheckedExtrinsic { fn deserialize(de: D) -> Result where D: serde::Deserializer<'a>, { let r = sp_core::bytes::deserialize(de)?; - Decode::decode(&mut &r[..]) + Self::decode(&mut &r[..]) .map_err(|e| serde::de::Error::custom(format!("Decode error: {}", e))) } } -impl fmt::Debug - for UncheckedExtrinsic +/// A payload that has been signed for an unchecked extrinsics. +/// +/// Note that the payload that we sign to produce unchecked extrinsic signature +/// is going to be different than the `SignaturePayload` - so the thing the extrinsic +/// actually contains. +pub struct SignedPayload( + (Call, Extension, Extension::Implicit), +); + +impl SignedPayload where - Address: fmt::Debug, - Call: fmt::Debug, - Extra: SignedExtension, + Call: Encode + Dispatchable, + Extension: TransactionExtensionBase, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "UncheckedExtrinsic({:?}, {:?})", - self.signature.as_ref().map(|x| (&x.0, &x.2)), - self.function, - ) + /// Create new `SignedPayload`. + /// + /// This function may fail if `implicit` of `Extension` is not available. + pub fn new(call: Call, tx_ext: Extension) -> Result { + let implicit = Extension::implicit(&tx_ext)?; + let raw_payload = (call, tx_ext, implicit); + Ok(Self(raw_payload)) + } + + /// Create new `SignedPayload` from raw components. + pub fn from_raw(call: Call, tx_ext: Extension, implicit: Extension::Implicit) -> Self { + Self((call, tx_ext, implicit)) + } + + /// Deconstruct the payload into it's components. + pub fn deconstruct(self) -> (Call, Extension, Extension::Implicit) { + self.0 } } -impl From> - for OpaqueExtrinsic +impl Encode for SignedPayload +where + Call: Encode + Dispatchable, + Extension: TransactionExtensionBase, +{ + /// Get an encoded version of this payload. + /// + /// Payloads longer than 256 bytes are going to be `blake2_256`-hashed. + fn using_encoded R>(&self, f: F) -> R { + self.0.using_encoded(|payload| { + if payload.len() > 256 { + f(&blake2_256(payload)[..]) + } else { + f(payload) + } + }) + } +} + +impl EncodeLike for SignedPayload +where + Call: Encode + Dispatchable, + Extension: TransactionExtensionBase, +{ +} + +impl + From> for OpaqueExtrinsic where Address: Encode, Signature: Encode, Call: Encode, - Extra: SignedExtension, + Extension: Encode, { - fn from(extrinsic: UncheckedExtrinsic) -> Self { + fn from(extrinsic: UncheckedExtrinsic) -> Self { Self::from_bytes(extrinsic.encode().as_slice()).expect( "both OpaqueExtrinsic and UncheckedExtrinsic have encoding that is compatible with \ raw Vec encoding; qed", @@ -411,60 +469,198 @@ where } } +#[cfg(test)] +mod legacy { + use codec::{Compact, Decode, Encode, EncodeLike, Error, Input}; + use scale_info::{ + build::Fields, meta_type, Path, StaticTypeInfo, Type, TypeInfo, TypeParameter, + }; + + pub type OldUncheckedSignaturePayload = (Address, Signature, Extra); + + #[derive(PartialEq, Eq, Clone, Debug)] + pub struct OldUncheckedExtrinsic { + pub signature: Option>, + pub function: Call, + } + + impl TypeInfo + for OldUncheckedExtrinsic + where + Address: StaticTypeInfo, + Call: StaticTypeInfo, + Signature: StaticTypeInfo, + Extra: StaticTypeInfo, + { + type Identity = OldUncheckedExtrinsic; + + fn type_info() -> Type { + Type::builder() + .path(Path::new("UncheckedExtrinsic", module_path!())) + // Include the type parameter types, even though they are not used directly in any + // of the described fields. These type definitions can be used by downstream + // consumers to help construct the custom decoding from the opaque bytes (see + // below). + .type_params(vec![ + TypeParameter::new("Address", Some(meta_type::
())), + TypeParameter::new("Call", Some(meta_type::())), + TypeParameter::new("Signature", Some(meta_type::())), + TypeParameter::new("Extra", Some(meta_type::())), + ]) + .docs(&["OldUncheckedExtrinsic raw bytes, requires custom decoding routine"]) + // Because of the custom encoding, we can only accurately describe the encoding as + // an opaque `Vec`. Downstream consumers will need to manually implement the + // codec to encode/decode the `signature` and `function` fields. + .composite(Fields::unnamed().field(|f| f.ty::>())) + } + } + + impl OldUncheckedExtrinsic { + pub fn new_signed( + function: Call, + signed: Address, + signature: Signature, + extra: Extra, + ) -> Self { + Self { signature: Some((signed, signature, extra)), function } + } + + pub fn new_unsigned(function: Call) -> Self { + Self { signature: None, function } + } + } + + impl Decode + for OldUncheckedExtrinsic + where + Address: Decode, + Signature: Decode, + Call: Decode, + Extra: Decode, + { + fn decode(input: &mut I) -> Result { + // This is a little more complicated than usual since the binary format must be + // compatible with SCALE's generic `Vec` type. Basically this just means accepting + // that there will be a prefix of vector length. + let expected_length: Compact = Decode::decode(input)?; + let before_length = input.remaining_len()?; + + let version = input.read_byte()?; + + let is_signed = version & 0b1000_0000 != 0; + let version = version & 0b0111_1111; + if version != 4u8 { + return Err("Invalid transaction version".into()) + } + + let signature = is_signed.then(|| Decode::decode(input)).transpose()?; + let function = Decode::decode(input)?; + + if let Some((before_length, after_length)) = + input.remaining_len()?.and_then(|a| before_length.map(|b| (b, a))) + { + let length = before_length.saturating_sub(after_length); + + if length != expected_length.0 as usize { + return Err("Invalid length prefix".into()) + } + } + + Ok(Self { signature, function }) + } + } + + #[docify::export(unchecked_extrinsic_encode_impl)] + impl Encode + for OldUncheckedExtrinsic + where + Address: Encode, + Signature: Encode, + Call: Encode, + Extra: Encode, + { + fn encode(&self) -> Vec { + let mut tmp = Vec::with_capacity(sp_std::mem::size_of::()); + + // 1 byte version id. + match self.signature.as_ref() { + Some(s) => { + tmp.push(4u8 | 0b1000_0000); + s.encode_to(&mut tmp); + }, + None => { + tmp.push(4u8 & 0b0111_1111); + }, + } + self.function.encode_to(&mut tmp); + + let compact_len = codec::Compact::(tmp.len() as u32); + + // Allocate the output buffer with the correct length + let mut output = Vec::with_capacity(compact_len.size_hint() + tmp.len()); + + compact_len.encode_to(&mut output); + output.extend(tmp); + + output + } + } + + impl EncodeLike + for OldUncheckedExtrinsic + where + Address: Encode, + Signature: Encode, + Call: Encode, + Extra: Encode, + { + } +} + #[cfg(test)] mod tests { - use super::*; + use super::{legacy::OldUncheckedExtrinsic, *}; use crate::{ codec::{Decode, Encode}, + impl_tx_ext_default, testing::TestSignature as TestSig, - traits::{DispatchInfoOf, IdentityLookup, SignedExtension}, + traits::{FakeDispatchable, IdentityLookup, TransactionExtension}, }; use sp_io::hashing::blake2_256; type TestContext = IdentityLookup; type TestAccountId = u64; - type TestCall = Vec; + type TestCall = FakeDispatchable>; const TEST_ACCOUNT: TestAccountId = 0; // NOTE: this is demonstration. One can simply use `()` for testing. #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, Ord, PartialOrd, TypeInfo)] - struct TestExtra; - impl SignedExtension for TestExtra { - const IDENTIFIER: &'static str = "TestExtra"; - type AccountId = u64; - type Call = (); - type AdditionalSigned = (); + struct DummyExtension; + impl TransactionExtensionBase for DummyExtension { + const IDENTIFIER: &'static str = "DummyExtension"; + type Implicit = (); + } + impl TransactionExtension for DummyExtension { + type Val = (); type Pre = (); - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } + impl_tx_ext_default!(TestCall; Context; validate prepare); } - type Ex = UncheckedExtrinsic; - type CEx = CheckedExtrinsic; + type Ex = UncheckedExtrinsic; + type CEx = CheckedExtrinsic; #[test] fn unsigned_codec_should_work() { - let ux = Ex::new_unsigned(vec![0u8; 0]); + let call: TestCall = vec![0u8; 0].into(); + let ux = Ex::new_inherent(call); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); } #[test] fn invalid_length_prefix_is_detected() { - let ux = Ex::new_unsigned(vec![0u8; 0]); + let ux = Ex::new_inherent(vec![0u8; 0].into()); let mut encoded = ux.encode(); let length = Compact::::decode(&mut &encoded[..]).unwrap(); @@ -473,13 +669,20 @@ mod tests { assert_eq!(Ex::decode(&mut &encoded[..]), Err("Invalid length prefix".into())); } + #[test] + fn transaction_codec_should_work() { + let ux = Ex::new_transaction(vec![0u8; 0].into(), DummyExtension); + let encoded = ux.encode(); + assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); + } + #[test] fn signed_codec_should_work() { let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, (vec![0u8; 0], TestExtra).encode()), - TestExtra, + TestSig(TEST_ACCOUNT, (vec![0u8; 0], DummyExtension).encode()), + DummyExtension, ); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); @@ -488,13 +691,13 @@ mod tests { #[test] fn large_signed_codec_should_work() { let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, TestSig( TEST_ACCOUNT, - (vec![0u8; 257], TestExtra).using_encoded(blake2_256)[..].to_owned(), + (vec![0u8; 257], DummyExtension).using_encoded(blake2_256)[..].to_owned(), ), - TestExtra, + DummyExtension, ); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); @@ -502,44 +705,63 @@ mod tests { #[test] fn unsigned_check_should_work() { - let ux = Ex::new_unsigned(vec![0u8; 0]); - assert!(!ux.is_signed().unwrap_or(false)); - assert!(>::check(ux, &Default::default()).is_ok()); + let ux = Ex::new_inherent(vec![0u8; 0].into()); + assert!(ux.is_inherent()); + assert_eq!( + >::check(ux, &Default::default()), + Ok(CEx { format: ExtrinsicFormat::Bare, function: vec![0u8; 0].into() }), + ); } #[test] fn badly_signed_check_should_fail() { let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, vec![0u8; 0]), - TestExtra, + TestSig(TEST_ACCOUNT, vec![0u8; 0].into()), + DummyExtension, ); - assert!(ux.is_signed().unwrap_or(false)); + assert!(!ux.is_inherent()); assert_eq!( >::check(ux, &Default::default()), Err(InvalidTransaction::BadProof.into()), ); } + #[test] + fn transaction_check_should_work() { + let ux = Ex::new_transaction(vec![0u8; 0].into(), DummyExtension); + assert!(!ux.is_inherent()); + assert_eq!( + >::check(ux, &Default::default()), + Ok(CEx { + format: ExtrinsicFormat::General(DummyExtension), + function: vec![0u8; 0].into() + }), + ); + } + #[test] fn signed_check_should_work() { let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, (vec![0u8; 0], TestExtra).encode()), - TestExtra, + TestSig(TEST_ACCOUNT, (vec![0u8; 0], DummyExtension).encode()), + DummyExtension, ); - assert!(ux.is_signed().unwrap_or(false)); + assert!(!ux.is_inherent()); assert_eq!( >::check(ux, &Default::default()), - Ok(CEx { signed: Some((TEST_ACCOUNT, TestExtra)), function: vec![0u8; 0] }), + Ok(CEx { + format: ExtrinsicFormat::Signed(TEST_ACCOUNT, DummyExtension), + function: vec![0u8; 0].into() + }), ); } #[test] fn encoding_matches_vec() { - let ex = Ex::new_unsigned(vec![0u8; 0]); + let ex = Ex::new_inherent(vec![0u8; 0].into()); let encoded = ex.encode(); let decoded = Ex::decode(&mut encoded.as_slice()).unwrap(); assert_eq!(decoded, ex); @@ -549,7 +771,7 @@ mod tests { #[test] fn conversion_to_opaque() { - let ux = Ex::new_unsigned(vec![0u8; 0]); + let ux = Ex::new_inherent(vec![0u8; 0].into()); let encoded = ux.encode(); let opaque: OpaqueExtrinsic = ux.into(); let opaque_encoded = opaque.encode(); @@ -558,10 +780,62 @@ mod tests { #[test] fn large_bad_prefix_should_work() { - let encoded = Compact::::from(u32::MAX).encode(); - assert_eq!( - Ex::decode(&mut &encoded[..]), - Err(Error::from("Not enough data to fill buffer")) - ); + let encoded = (Compact::::from(u32::MAX), Preamble::<(), (), ()>::Bare).encode(); + assert!(Ex::decode(&mut &encoded[..]).is_err()); + } + + #[test] + fn legacy_signed_encode_decode() { + let call: TestCall = vec![0u8; 0].into(); + let signed = TEST_ACCOUNT; + let signature = TestSig(TEST_ACCOUNT, (vec![0u8; 0], DummyExtension).encode()); + let extension = DummyExtension; + + let new_ux = Ex::new_signed(call.clone(), signed, signature.clone(), extension.clone()); + let old_ux = + OldUncheckedExtrinsic::::new_signed( + call, signed, signature, extension, + ); + + let encoded_new_ux = new_ux.encode(); + let encoded_old_ux = old_ux.encode(); + + assert_eq!(encoded_new_ux, encoded_old_ux); + + let decoded_new_ux = Ex::decode(&mut &encoded_new_ux[..]).unwrap(); + let decoded_old_ux = + OldUncheckedExtrinsic::::decode( + &mut &encoded_old_ux[..], + ) + .unwrap(); + + assert_eq!(new_ux, decoded_new_ux); + assert_eq!(old_ux, decoded_old_ux); + } + + #[test] + fn legacy_unsigned_encode_decode() { + let call: TestCall = vec![0u8; 0].into(); + + let new_ux = Ex::new_bare(call.clone()); + let old_ux = + OldUncheckedExtrinsic::::new_unsigned( + call, + ); + + let encoded_new_ux = new_ux.encode(); + let encoded_old_ux = old_ux.encode(); + + assert_eq!(encoded_new_ux, encoded_old_ux); + + let decoded_new_ux = Ex::decode(&mut &encoded_new_ux[..]).unwrap(); + let decoded_old_ux = + OldUncheckedExtrinsic::::decode( + &mut &encoded_old_ux[..], + ) + .unwrap(); + + assert_eq!(new_ux, decoded_new_ux); + assert_eq!(old_ux, decoded_old_ux); } } diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index ddf92554c83056f192015a26285408b7fdf33695..70605970b4efa683deaef19fed7c2cfa920d3e51 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -125,6 +125,11 @@ pub use sp_arithmetic::{ Perquintill, Rational128, Rounding, UpperOf, }; +pub use transaction_validity::{ + InvalidTransaction, TransactionSource, TransactionValidityError, UnknownTransaction, + ValidTransaction, +}; + pub use either::Either; /// The number of bytes of the module-specific `error` field defined in [`ModuleError`]. @@ -443,21 +448,21 @@ impl std::fmt::Display for MultiSigner { impl Verify for MultiSignature { type Signer = MultiSigner; fn verify>(&self, mut msg: L, signer: &AccountId32) -> bool { - match (self, signer) { - (Self::Ed25519(ref sig), who) => match ed25519::Public::from_slice(who.as_ref()) { + match self { + Self::Ed25519(ref sig) => match ed25519::Public::from_slice(signer.as_ref()) { Ok(signer) => sig.verify(msg, &signer), Err(()) => false, }, - (Self::Sr25519(ref sig), who) => match sr25519::Public::from_slice(who.as_ref()) { + Self::Sr25519(ref sig) => match sr25519::Public::from_slice(signer.as_ref()) { Ok(signer) => sig.verify(msg, &signer), Err(()) => false, }, - (Self::Ecdsa(ref sig), who) => { + Self::Ecdsa(ref sig) => { let m = sp_io::hashing::blake2_256(msg.get()); match sp_io::crypto::secp256k1_ecdsa_recover_compressed(sig.as_ref(), &m) { Ok(pubkey) => &sp_io::hashing::blake2_256(pubkey.as_ref()) == - >::as_ref(who), + >::as_ref(signer), _ => false, } }, @@ -944,9 +949,13 @@ impl<'a> ::serde::Deserialize<'a> for OpaqueExtrinsic { } } +// TODO: OpaqueExtrinsics cannot act like regular extrinsics, right?! impl traits::Extrinsic for OpaqueExtrinsic { type Call = (); type SignaturePayload = (); + fn is_bare(&self) -> bool { + false + } } /// Print something that implements `Printable` from the runtime. @@ -998,6 +1007,16 @@ impl TransactionOutcome { } } +/// Confines the kind of extrinsics that can be included in a block. +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Encode, Decode, TypeInfo)] +pub enum ExtrinsicInclusionMode { + /// All extrinsics are allowed to be included in this block. + #[default] + AllExtrinsics, + /// Inherents are allowed to be included. + OnlyInherents, +} + #[cfg(test)] mod tests { use crate::traits::BlakeTwo256; diff --git a/substrate/primitives/runtime/src/testing.rs b/substrate/primitives/runtime/src/testing.rs index 5f94c834a8f295bee945e9d772019e12ed37c828..3d6e0b5ab8bc1c2836755f8622b1f0b7adb8d847 100644 --- a/substrate/primitives/runtime/src/testing.rs +++ b/substrate/primitives/runtime/src/testing.rs @@ -21,24 +21,16 @@ use crate::{ codec::{Codec, Decode, Encode, MaxEncodedLen}, generic, scale_info::TypeInfo, - traits::{ - self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, OpaqueKeys, - PostDispatchInfoOf, SignaturePayload, SignedExtension, ValidateUnsigned, - }, - transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, - ApplyExtrinsicResultWithInfo, KeyTypeId, + traits::{self, BlakeTwo256, Dispatchable, OpaqueKeys}, + DispatchResultWithInfo, KeyTypeId, }; -use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer}; +use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize}; use sp_core::{ crypto::{key_types, ByteArray, CryptoType, Dummy}, U256, }; pub use sp_core::{sr25519, H256}; -use std::{ - cell::RefCell, - fmt::{self, Debug}, - ops::Deref, -}; +use std::{cell::RefCell, fmt::Debug}; /// A dummy type which can be used instead of regular cryptographic primitives. /// @@ -198,42 +190,6 @@ impl Header { } } -/// An opaque extrinsic wrapper type. -#[derive(PartialEq, Eq, Clone, Debug, Encode, Decode)] -pub struct ExtrinsicWrapper(Xt); - -impl traits::Extrinsic for ExtrinsicWrapper { - type Call = (); - type SignaturePayload = (); - - fn is_signed(&self) -> Option { - None - } -} - -impl serde::Serialize for ExtrinsicWrapper { - fn serialize(&self, seq: S) -> Result - where - S: ::serde::Serializer, - { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } -} - -impl From for ExtrinsicWrapper { - fn from(xt: Xt) -> Self { - ExtrinsicWrapper(xt) - } -} - -impl Deref for ExtrinsicWrapper { - type Target = Xt; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - /// Testing block #[derive(PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode, TypeInfo)] pub struct Block { @@ -283,139 +239,22 @@ where } } -/// The signature payload of a `TestXt`. -type TxSingaturePayload = (u64, Extra); - -impl SignaturePayload for TxSingaturePayload { - type SignatureAddress = u64; - type Signature = (); - type SignatureExtra = Extra; -} - -/// Test transaction, tuple of (sender, call, signed_extra) -/// with index only used if sender is some. -/// -/// If sender is some then the transaction is signed otherwise it is unsigned. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] -pub struct TestXt { - /// Signature of the extrinsic. - pub signature: Option>, - /// Call of the extrinsic. - pub call: Call, -} - -impl TestXt { - /// Create a new `TextXt`. - pub fn new(call: Call, signature: Option<(u64, Extra)>) -> Self { - Self { call, signature } - } -} - -impl Serialize for TestXt -where - TestXt: Encode, -{ - fn serialize(&self, seq: S) -> Result - where - S: Serializer, - { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } -} +/// Wrapper over a `u64` that can be used as a `RuntimeCall`. +#[derive(PartialEq, Eq, Debug, Clone, Encode, Decode, TypeInfo)] +pub struct MockCallU64(pub u64); -impl Debug for TestXt { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TestXt({:?}, ...)", self.signature.as_ref().map(|x| &x.0)) +impl Dispatchable for MockCallU64 { + type RuntimeOrigin = u64; + type Config = (); + type Info = (); + type PostInfo = (); + fn dispatch(self, _origin: Self::RuntimeOrigin) -> DispatchResultWithInfo { + Ok(()) } } -impl Checkable for TestXt { - type Checked = Self; - fn check(self, _: &Context) -> Result { - Ok(self) - } - - #[cfg(feature = "try-runtime")] - fn unchecked_into_checked_i_know_what_i_am_doing( - self, - _: &Context, - ) -> Result { - unreachable!() - } -} - -impl traits::Extrinsic - for TestXt -{ - type Call = Call; - type SignaturePayload = TxSingaturePayload; - - fn is_signed(&self) -> Option { - Some(self.signature.is_some()) - } - - fn new(c: Call, sig: Option) -> Option { - Some(TestXt { signature: sig, call: c }) - } -} - -impl traits::ExtrinsicMetadata for TestXt -where - Call: Codec + Sync + Send, - Extra: SignedExtension, -{ - type SignedExtensions = Extra; - const VERSION: u8 = 0u8; -} - -impl Applyable for TestXt -where - Call: 'static - + Sized - + Send - + Sync - + Clone - + Eq - + Codec - + Debug - + Dispatchable, - Extra: SignedExtension, - Origin: From>, -{ - type Call = Call; - - /// Checks to see if this is a valid *transaction*. It returns information on it if so. - fn validate>( - &self, - source: TransactionSource, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - if let Some((ref id, ref extra)) = self.signature { - Extra::validate(extra, id, &self.call, info, len) - } else { - let valid = Extra::validate_unsigned(&self.call, info, len)?; - let unsigned_validation = U::validate_unsigned(source, &self.call)?; - Ok(valid.combine_with(unsigned_validation)) - } - } - - /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, - /// index and sender. - fn apply>( - self, - info: &DispatchInfoOf, - len: usize, - ) -> ApplyExtrinsicResultWithInfo> { - let maybe_who = if let Some((who, extra)) = self.signature { - Extra::pre_dispatch(extra, &who, &self.call, info, len)?; - Some(who) - } else { - Extra::pre_dispatch_unsigned(&self.call, info, len)?; - U::pre_dispatch(&self.call)?; - None - }; - - Ok(self.call.dispatch(maybe_who.into())) +impl From for MockCallU64 { + fn from(value: u64) -> Self { + Self(value) } } diff --git a/substrate/primitives/runtime/src/traits.rs b/substrate/primitives/runtime/src/traits/mod.rs similarity index 94% rename from substrate/primitives/runtime/src/traits.rs rename to substrate/primitives/runtime/src/traits/mod.rs index caede5e2b59a7e3843c1ec966a02f2bbd86f2a84..b217166d8de34e35b102aefd786e8e25e91b1bb8 100644 --- a/substrate/primitives/runtime/src/traits.rs +++ b/substrate/primitives/runtime/src/traits/mod.rs @@ -19,7 +19,7 @@ use crate::{ generic::Digest, - scale_info::{MetaType, StaticTypeInfo, TypeInfo}, + scale_info::{StaticTypeInfo, TypeInfo}, transaction_validity::{ TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, ValidTransaction, @@ -52,6 +52,12 @@ use std::fmt::Display; #[cfg(feature = "std")] use std::str::FromStr; +pub mod transaction_extension; +pub use transaction_extension::{ + DispatchTransaction, TransactionExtension, TransactionExtensionBase, + TransactionExtensionInterior, TransactionExtensionMetadata, ValidateResult, +}; + /// A lazy value. pub trait Lazy { /// Get a reference to the underlying value. @@ -1253,7 +1259,7 @@ pub trait Header: // that is then used to define `UncheckedExtrinsic`. // ```ignore // pub type UncheckedExtrinsic = -// generic::UncheckedExtrinsic; +// generic::UncheckedExtrinsic; // ``` // This `UncheckedExtrinsic` is supplied to the `Block`. // ```ignore @@ -1323,19 +1329,31 @@ pub trait Extrinsic: Sized { /// Is this `Extrinsic` signed? /// If no information are available about signed/unsigned, `None` should be returned. + #[deprecated = "Use and implement `!is_bare()` instead"] fn is_signed(&self) -> Option { None } - /// Create new instance of the extrinsic. - /// - /// Extrinsics can be split into: - /// 1. Inherents (no signature; created by validators during block production) - /// 2. Unsigned Transactions (no signature; represent "system calls" or other special kinds of - /// calls) 3. Signed Transactions (with signature; a regular transactions with known origin) + /// Returns `true` if this `Extrinsic` is bare. + fn is_bare(&self) -> bool { + #[allow(deprecated)] + !self + .is_signed() + .expect("`is_signed` must return `Some` on production extrinsics; qed") + } + + /// Create a new old-school extrinsic, either a bare extrinsic if `_signed_data` is `None` or + /// a signed transaction is it is `Some`. + #[deprecated = "Use `new_inherent` or the `CreateTransaction` trait instead"] fn new(_call: Self::Call, _signed_data: Option) -> Option { None } + + /// Create a new inherent extrinsic. + fn new_inherent(function: Self::Call) -> Self { + #[allow(deprecated)] + Self::new(function, None).expect("Extrinsic must provide inherents; qed") + } } /// Something that acts like a [`SignaturePayload`](Extrinsic::SignaturePayload) of an @@ -1371,7 +1389,8 @@ pub trait ExtrinsicMetadata { const VERSION: u8; /// Signed extensions attached to this `Extrinsic`. - type SignedExtensions: SignedExtension; + // TODO: metadata-v16: rename to `Extension`. + type Extra; } /// Extract the hashing type for a block. @@ -1456,6 +1475,8 @@ pub trait Dispatchable { -> crate::DispatchResultWithInfo; } +/// Shortcut to reference the `Origin` type of a `Dispatchable`. +pub type OriginOf = ::RuntimeOrigin; /// Shortcut to reference the `Info` type of a `Dispatchable`. pub type DispatchInfoOf = ::Info; /// Shortcut to reference the `PostInfo` type of a `Dispatchable`. @@ -1474,8 +1495,49 @@ impl Dispatchable for () { } } +/// Dispatchable impl containing an arbitrary value which panics if it actually is dispatched. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct FakeDispatchable(pub Inner); +impl From for FakeDispatchable { + fn from(inner: Inner) -> Self { + Self(inner) + } +} +impl FakeDispatchable { + /// Take `self` and return the underlying inner value. + pub fn deconstruct(self) -> Inner { + self.0 + } +} +impl AsRef for FakeDispatchable { + fn as_ref(&self) -> &Inner { + &self.0 + } +} + +impl Dispatchable for FakeDispatchable { + type RuntimeOrigin = (); + type Config = (); + type Info = (); + type PostInfo = (); + fn dispatch( + self, + _origin: Self::RuntimeOrigin, + ) -> crate::DispatchResultWithInfo { + panic!("This implementation should not be used for actual dispatch."); + } +} + +/// Runtime Origin which includes a System Origin variant whose `AccountId` is the parameter. +pub trait AsSystemOriginSigner { + /// Extract a reference of the inner value of the System `Origin::Signed` variant, if self has + /// that variant. + fn as_system_origin_signer(&self) -> Option<&AccountId>; +} + /// Means by which a transaction may be extended. This type embodies both the data and the logic /// that should be additionally associated with the transaction. It should be plain old data. +#[deprecated = "Use `TransactionExtension` instead."] pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq + StaticTypeInfo { @@ -1493,7 +1555,7 @@ pub trait SignedExtension: /// Any additional data that will go into the signed payload. This may be created dynamically /// from the transaction using the `additional_signed` function. - type AdditionalSigned: Encode + TypeInfo; + type AdditionalSigned: Codec + TypeInfo; /// The type that encodes information that can be passed from pre_dispatch to post-dispatch. type Pre; @@ -1532,38 +1594,6 @@ pub trait SignedExtension: len: usize, ) -> Result; - /// Validate an unsigned transaction for the transaction queue. - /// - /// This function can be called frequently by the transaction queue - /// to obtain transaction validity against current state. - /// It should perform all checks that determine a valid unsigned transaction, - /// and quickly eliminate ones that are stale or incorrect. - /// - /// Make sure to perform the same checks in `pre_dispatch_unsigned` function. - fn validate_unsigned( - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - Ok(ValidTransaction::default()) - } - - /// Do any pre-flight stuff for a unsigned transaction. - /// - /// Note this function by default delegates to `validate_unsigned`, so that - /// all checks performed for the transaction queue are also performed during - /// the dispatch phase (applying the extrinsic). - /// - /// If you ever override this function, you need to make sure to always - /// perform the same validation as in `validate_unsigned`. - fn pre_dispatch_unsigned( - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result<(), TransactionValidityError> { - Self::validate_unsigned(call, info, len).map(|_| ()).map_err(Into::into) - } - /// Do any post-flight stuff for an extrinsic. /// /// If the transaction is signed, then `_pre` will contain the output of `pre_dispatch`, @@ -1594,126 +1624,47 @@ pub trait SignedExtension: /// /// As a [`SignedExtension`] can be a tuple of [`SignedExtension`]s we need to return a `Vec` /// that holds the metadata of each one. Each individual `SignedExtension` must return - /// *exactly* one [`SignedExtensionMetadata`]. + /// *exactly* one [`TransactionExtensionMetadata`]. /// /// This method provides a default implementation that returns a vec containing a single - /// [`SignedExtensionMetadata`]. - fn metadata() -> Vec { - sp_std::vec![SignedExtensionMetadata { + /// [`TransactionExtensionMetadata`]. + fn metadata() -> Vec { + sp_std::vec![TransactionExtensionMetadata { identifier: Self::IDENTIFIER, ty: scale_info::meta_type::(), additional_signed: scale_info::meta_type::() }] } -} - -/// Information about a [`SignedExtension`] for the runtime metadata. -pub struct SignedExtensionMetadata { - /// The unique identifier of the [`SignedExtension`]. - pub identifier: &'static str, - /// The type of the [`SignedExtension`]. - pub ty: MetaType, - /// The type of the [`SignedExtension`] additional signed data for the payload. - pub additional_signed: MetaType, -} - -#[impl_for_tuples(1, 12)] -impl SignedExtension for Tuple { - for_tuples!( where #( Tuple: SignedExtension )* ); - type AccountId = AccountId; - type Call = Call; - const IDENTIFIER: &'static str = "You should call `identifier()`!"; - for_tuples!( type AdditionalSigned = ( #( Tuple::AdditionalSigned ),* ); ); - for_tuples!( type Pre = ( #( Tuple::Pre ),* ); ); - - fn additional_signed(&self) -> Result { - Ok(for_tuples!( ( #( Tuple.additional_signed()? ),* ) )) - } - - fn validate( - &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - let valid = ValidTransaction::default(); - for_tuples!( #( let valid = valid.combine_with(Tuple.validate(who, call, info, len)?); )* ); - Ok(valid) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - Ok(for_tuples!( ( #( Tuple.pre_dispatch(who, call, info, len)? ),* ) )) - } + /// Validate an unsigned transaction for the transaction queue. + /// + /// This function can be called frequently by the transaction queue + /// to obtain transaction validity against current state. + /// It should perform all checks that determine a valid unsigned transaction, + /// and quickly eliminate ones that are stale or incorrect. fn validate_unsigned( - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, ) -> TransactionValidity { - let valid = ValidTransaction::default(); - for_tuples!( #( let valid = valid.combine_with(Tuple::validate_unsigned(call, info, len)?); )* ); - Ok(valid) + Ok(ValidTransaction::default()) } + /// Do any pre-flight stuff for an unsigned transaction. + /// + /// Note this function by default delegates to `validate_unsigned`, so that + /// all checks performed for the transaction queue are also performed during + /// the dispatch phase (applying the extrinsic). + /// + /// If you ever override this function, you need not perform the same validation as in + /// `validate_unsigned`. fn pre_dispatch_unsigned( - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, ) -> Result<(), TransactionValidityError> { - for_tuples!( #( Tuple::pre_dispatch_unsigned(call, info, len)?; )* ); Ok(()) } - - fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, - len: usize, - result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - match pre { - Some(x) => { - for_tuples!( #( Tuple::post_dispatch(Some(x.Tuple), info, post_info, len, result)?; )* ); - }, - None => { - for_tuples!( #( Tuple::post_dispatch(None, info, post_info, len, result)?; )* ); - }, - } - Ok(()) - } - - fn metadata() -> Vec { - let mut ids = Vec::new(); - for_tuples!( #( ids.extend(Tuple::metadata()); )* ); - ids - } -} - -impl SignedExtension for () { - type AccountId = u64; - type AdditionalSigned = (); - type Call = (); - type Pre = (); - const IDENTIFIER: &'static str = "UnitSignedExtension"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } } /// An "executable" piece of information, used by the standard Substrate Executive in order to @@ -1762,6 +1713,7 @@ pub trait GetNodeBlockType { /// function is called right before dispatching the call wrapped by an unsigned extrinsic. The /// [`validate_unsigned`](Self::validate_unsigned) function is mainly being used in the context of /// the transaction pool to check the validity of the call wrapped by an unsigned extrinsic. +// TODO: Rename to ValidateBareTransaction (or just remove). pub trait ValidateUnsigned { /// The call to validate type Call; diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs b/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs new file mode 100644 index 0000000000000000000000000000000000000000..a834e88c0b4e6dae831b5eec3f138f9aae3764a8 --- /dev/null +++ b/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs @@ -0,0 +1,133 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The [AsTransactionExtension] adapter struct for adapting [SignedExtension]s to +//! [TransactionExtension]s. + +#![allow(deprecated)] + +use scale_info::TypeInfo; +use sp_core::RuntimeDebug; + +use crate::{ + traits::{AsSystemOriginSigner, SignedExtension, ValidateResult}, + InvalidTransaction, +}; + +use super::*; + +/// Adapter to use a `SignedExtension` in the place of a `TransactionExtension`. +#[derive(TypeInfo, Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +#[deprecated = "Convert your SignedExtension to a TransactionExtension."] +pub struct AsTransactionExtension(pub SE); + +impl Default for AsTransactionExtension { + fn default() -> Self { + Self(SE::default()) + } +} + +impl From for AsTransactionExtension { + fn from(value: SE) -> Self { + Self(value) + } +} + +impl TransactionExtensionBase for AsTransactionExtension { + const IDENTIFIER: &'static str = SE::IDENTIFIER; + type Implicit = SE::AdditionalSigned; + + fn implicit(&self) -> Result { + self.0.additional_signed() + } + fn metadata() -> Vec { + SE::metadata() + } +} + +impl TransactionExtension + for AsTransactionExtension +where + ::RuntimeOrigin: AsSystemOriginSigner + Clone, +{ + type Val = (); + type Pre = SE::Pre; + + fn validate( + &self, + origin: ::RuntimeOrigin, + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + let r = self.0.validate(who, call, info, len)?; + Ok((r, (), origin)) + } + + fn prepare( + self, + _: (), + origin: &::RuntimeOrigin, + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + _context: &Context, + ) -> Result { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + self.0.pre_dispatch(who, call, info, len) + } + + fn post_dispatch( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + _context: &Context, + ) -> Result<(), TransactionValidityError> { + SE::post_dispatch(Some(pre), info, post_info, len, result) + } + + fn validate_bare_compat( + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + SE::validate_unsigned(call, info, len) + } + + fn pre_dispatch_bare_compat( + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + SE::pre_dispatch_unsigned(call, info, len) + } + + fn post_dispatch_bare_compat( + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + SE::post_dispatch(None, info, post_info, len, result) + } +} diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs new file mode 100644 index 0000000000000000000000000000000000000000..8efa586aa0e77f60b5671bb868438d7aa7bdaf3d --- /dev/null +++ b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs @@ -0,0 +1,141 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The [DispatchTransaction] trait. + +use super::*; + +/// Single-function utility trait with a blanket impl over [TransactionExtension] in order to +/// provide transaction dispatching functionality. We avoid implementing this directly on the trait +/// since we never want it to be overriden by the trait implementation. +pub trait DispatchTransaction { + /// The origin type of the transaction. + type Origin; + /// The info type. + type Info; + /// The resultant type. + type Result; + /// The `Val` of the extension. + type Val; + /// The `Pre` of the extension. + type Pre; + /// Just validate a transaction. + /// + /// The is basically the same as [validate](TransactionExtension::validate), except that there + /// is no need to supply the bond data. + fn validate_only( + &self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + ) -> Result<(ValidTransaction, Self::Val, Self::Origin), TransactionValidityError>; + /// Prepare and validate a transaction, ready for dispatch. + fn validate_and_prepare( + self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + ) -> Result<(Self::Pre, Self::Origin), TransactionValidityError>; + /// Dispatch a transaction with the given base origin and call. + fn dispatch_transaction( + self, + origin: Self::Origin, + call: Call, + info: &Self::Info, + len: usize, + ) -> Self::Result; + /// Do everything which would be done in a [dispatch_transaction](Self::dispatch_transaction), + /// but instead of executing the call, execute `substitute` instead. Since this doesn't actually + /// dispatch the call, it doesn't need to consume it and so `call` can be passed as a reference. + fn test_run( + self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + substitute: impl FnOnce( + Self::Origin, + ) -> crate::DispatchResultWithInfo<::PostInfo>, + ) -> Self::Result; +} + +impl, Call: Dispatchable + Encode> DispatchTransaction + for T +{ + type Origin = ::RuntimeOrigin; + type Info = DispatchInfoOf; + type Result = crate::ApplyExtrinsicResultWithInfo>; + type Val = T::Val; + type Pre = T::Pre; + + fn validate_only( + &self, + origin: Self::Origin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(ValidTransaction, T::Val, Self::Origin), TransactionValidityError> { + self.validate(origin, call, info, len, &mut (), self.implicit()?, call) + } + fn validate_and_prepare( + self, + origin: Self::Origin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(T::Pre, Self::Origin), TransactionValidityError> { + let (_, val, origin) = self.validate_only(origin, call, info, len)?; + let pre = self.prepare(val, &origin, &call, info, len, &())?; + Ok((pre, origin)) + } + fn dispatch_transaction( + self, + origin: ::RuntimeOrigin, + call: Call, + info: &DispatchInfoOf, + len: usize, + ) -> Self::Result { + let (pre, origin) = self.validate_and_prepare(origin, &call, info, len)?; + let res = call.dispatch(origin); + let post_info = res.unwrap_or_else(|err| err.post_info); + let pd_res = res.map(|_| ()).map_err(|e| e.error); + T::post_dispatch(pre, info, &post_info, len, &pd_res, &())?; + Ok(res) + } + fn test_run( + self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + substitute: impl FnOnce( + Self::Origin, + ) -> crate::DispatchResultWithInfo<::PostInfo>, + ) -> Self::Result { + let (pre, origin) = self.validate_and_prepare(origin, &call, info, len)?; + let res = substitute(origin); + let post_info = match res { + Ok(info) => info, + Err(err) => err.post_info, + }; + let pd_res = res.map(|_| ()).map_err(|e| e.error); + T::post_dispatch(pre, info, &post_info, len, &pd_res, &())?; + Ok(res) + } +} diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..69a0ba18adb72a67951d8548724182060916e0ee --- /dev/null +++ b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs @@ -0,0 +1,526 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The transaction extension trait. + +use crate::{ + scale_info::{MetaType, StaticTypeInfo}, + transaction_validity::{TransactionValidity, TransactionValidityError, ValidTransaction}, + DispatchResult, +}; +use codec::{Codec, Decode, Encode}; +use impl_trait_for_tuples::impl_for_tuples; +#[doc(hidden)] +pub use sp_std::marker::PhantomData; +use sp_std::{self, fmt::Debug, prelude::*}; +use sp_weights::Weight; +use tuplex::{PopFront, PushBack}; + +use super::{DispatchInfoOf, Dispatchable, OriginOf, PostDispatchInfoOf}; + +mod as_transaction_extension; +mod dispatch_transaction; +#[allow(deprecated)] +pub use as_transaction_extension::AsTransactionExtension; +pub use dispatch_transaction::DispatchTransaction; + +/// Shortcut for the result value of the `validate` function. +pub type ValidateResult = + Result<(ValidTransaction, Val, OriginOf), TransactionValidityError>; + +/// Simple blanket implementation trait to denote the bounds of a type which can be contained within +/// a [`TransactionExtension`]. +pub trait TransactionExtensionInterior: + Codec + Debug + Sync + Send + Clone + Eq + PartialEq + StaticTypeInfo +{ +} +impl + TransactionExtensionInterior for T +{ +} + +/// Base for [TransactionExtension]s; this contains the associated types and does not require any +/// generic parameterization. +pub trait TransactionExtensionBase: TransactionExtensionInterior { + /// Unique identifier of this signed extension. + /// + /// This will be exposed in the metadata to identify the signed extension used in an extrinsic. + const IDENTIFIER: &'static str; + + /// Any additional data which was known at the time of transaction construction and can be + /// useful in authenticating the transaction. This is determined dynamically in part from the + /// on-chain environment using the `implicit` function and not directly contained in the + /// transaction itself and therefore is considered "implicit". + type Implicit: Codec + StaticTypeInfo; + + /// Determine any additional data which was known at the time of transaction construction and + /// can be useful in authenticating the transaction. The expected usage of this is to include in + /// any data which is signed and verified as part of transactiob validation. Also perform any + /// pre-signature-verification checks and return an error if needed. + fn implicit(&self) -> Result { + use crate::InvalidTransaction::IndeterminateImplicit; + Ok(Self::Implicit::decode(&mut &[][..]).map_err(|_| IndeterminateImplicit)?) + } + + /// The weight consumed by executing this extension instance fully during transaction dispatch. + fn weight(&self) -> Weight { + Weight::zero() + } + + /// Returns the metadata for this extension. + /// + /// As a [`TransactionExtension`] can be a tuple of [`TransactionExtension`]s we need to return + /// a `Vec` that holds the metadata of each one. Each individual `TransactionExtension` must + /// return *exactly* one [`TransactionExtensionMetadata`]. + /// + /// This method provides a default implementation that returns a vec containing a single + /// [`TransactionExtensionMetadata`]. + fn metadata() -> Vec { + sp_std::vec![TransactionExtensionMetadata { + identifier: Self::IDENTIFIER, + ty: scale_info::meta_type::(), + // TODO: Metadata-v16: Rename to "implicit" + additional_signed: scale_info::meta_type::() + }] + } +} + +/// Means by which a transaction may be extended. This type embodies both the data and the logic +/// that should be additionally associated with the transaction. It should be plain old data. +/// +/// The simplest transaction extension would be the Unit type (and empty pipeline) `()`. This +/// executes no additional logic and implies a dispatch of the transaction's call using the +/// inherited origin (either `None` or `Signed`, depending on whether this is a signed or general +/// transaction). +/// +/// Transaction extensions are capable of altering certain associated semantics: +/// +/// - They may define the origin with which the transaction's call should be dispatched. +/// - They may define various parameters used by the transction queue to determine under what +/// conditions the transaction should be retained and introduced on-chain. +/// - They may define whether this transaction is acceptable for introduction on-chain at all. +/// +/// Each of these semantics are defined by the `validate` function. +/// +/// **NOTE: Transaction extensions cannot under any circumctances alter the call itself.** +/// +/// Transaction extensions are capable of defining logic which is executed additionally to the +/// dispatch of the call: +/// +/// - They may define logic which must be executed prior to the dispatch of the call. +/// - They may also define logic which must be executed after the dispatch of the call. +/// +/// Each of these semantics are defined by the `prepare` and `post_dispatch` functions respectively. +/// +/// Finally, transaction extensions may define additional data to help define the implications of +/// the logic they introduce. This additional data may be explicitly defined by the transaction +/// author (in which case it is included as part of the transaction body), or it may be implicitly +/// defined by the transaction extension based around the on-chain state (which the transaction +/// author is assumed to know). This data may be utilized by the above logic to alter how a node's +/// transaction queue treats this transaction. +/// +/// ## Default implementations +/// +/// Of the 5 functions in this trait, 3 of them must return a value of an associated type on +/// success, and none of these types implement [Default] or anything like it. This means that +/// default implementations cannot be provided for these functions. However, a macro is provided +/// [impl_tx_ext_default](crate::impl_tx_ext_default) which is capable of generating default +/// implementations for each of these 3 functions. If you do not wish to introduce additional logic +/// into the transaction pipeline, then it is recommended that you use this macro to implement these +/// functions. +/// +/// ## Pipelines, Inherited Implications, and Authorized Origins +/// +/// Requiring a single transaction extension to define all of the above semantics would be +/// cumbersome and would lead to a lot of boilerplate. Instead, transaction extensions are +/// aggregated into pipelines, which are tuples of transaction extensions. Each extension in the +/// pipeline is executed in order, and the output of each extension is aggregated and/or relayed as +/// the input to the next extension in the pipeline. +/// +/// This ordered composition happens with all datatypes ([Val](TransactionExtension::Val), +/// [Pre](TransactionExtension::Pre) and [Implicit](TransactionExtensionBase::Implicit)) as well as +/// all functions. There are important consequences stemming from how the composition affects the +/// meaning of the `origin` and `implication` parameters as well as the results. Whereas the +/// [prepare](TransactionExtension::prepare) and +/// [post_dispatch](TransactionExtension::post_dispatch) functions are clear in their meaning, the +/// [validate](TransactionExtension::validate) function is fairly sophisticated and warrants +/// further explanation. +/// +/// Firstly, the `origin` parameter. The `origin` passed into the first item in a pipeline is simply +/// that passed into the tuple itself. It represents an authority who has authorized the implication +/// of the transaction, as of the extension it has been passed into *and any further extensions it +/// may pass though, all the way to, and including, the transaction's dispatch call itself. Each +/// following item in the pipeline is passed the origin which the previous item returned. The origin +/// returned from the final item in the pipeline is the origin which is returned by the tuple +/// itself. +/// +/// This means that if a constituent extension returns a different origin to the one it was called +/// with, then (assuming no other extension changes it further) *this new origin will be used for +/// all extensions following it in the pipeline, and will be returned from the pipeline to be used +/// as the origin for the call's dispatch*. The call itself as well as all these extensions +/// following may each imply consequence for this origin. We call this the *inherited implication*. +/// +/// The *inherited implication* is the cumulated on-chain effects born by whatever origin is +/// returned. It is expressed to the [validate](TransactionExtension::validate) function only as the +/// `implication` argument which implements the [Encode] trait. A transaction extension may define +/// its own implications through its own fields and the +/// [implicit](TransactionExtensionBase::implicit) function. This is only utilized by extensions +/// which preceed it in a pipeline or, if the transaction is an old-school signed trasnaction, the +/// underlying transaction verification logic. +/// +/// **The inherited implication passed as the `implication` parameter to +/// [validate](TransactionExtension::validate) does not include the extension's inner data itself +/// nor does it include the result of the extension's `implicit` function.** If you both provide an +/// implication and rely on the implication, then you need to manually aggregate your extensions +/// implication with the aggregated implication passed in. +pub trait TransactionExtension: TransactionExtensionBase { + /// The type that encodes information that can be passed from validate to prepare. + type Val; + + /// The type that encodes information that can be passed from prepare to post-dispatch. + type Pre; + + /// Validate a transaction for the transaction queue. + /// + /// This function can be called frequently by the transaction queue to obtain transaction + /// validity against current state. It should perform all checks that determine a valid + /// transaction, that can pay for its execution and quickly eliminate ones that are stale or + /// incorrect. + /// + /// Parameters: + /// - `origin`: The origin of the transaction which this extension inherited; coming from an + /// "old-school" *signed transaction*, this will be a system `RawOrigin::Signed` value. If the + /// transaction is a "new-school" *General Transaction*, then this will be a system + /// `RawOrigin::None` value. If this extension is an item in a composite, then it could be + /// anything which was previously returned as an `origin` value in the result of a `validate` + /// call. + /// - `call`: The `Call` wrapped by this extension. + /// - `info`: Information concerning, and inherent to, the transaction's call. + /// - `len`: The total length of the encoded transaction. + /// - `inherited_implication`: The *implication* which this extension inherits. This is a tuple + /// of the transaction's call and some additional opaque-but-encodable data. Coming directly + /// from a transaction, the latter is [()]. However, if this extension is expressed as part of + /// a composite type, then the latter component is equal to any further implications to which + /// the returned `origin` could potentially apply. See Pipelines, Inherited Implications, and + /// Authorized Origins for more information. + /// - `context`: Some opaque mutable context, as yet unused. + /// + /// Returns a [ValidateResult], which is a [Result] whose success type is a tuple of + /// [ValidTransaction] (defining useful metadata for the transaction queue), the [Self::Val] + /// token of this transaction, which gets passed into [prepare](TransactionExtension::prepare), + /// and the origin of the transaction, which gets passed into + /// [prepare](TransactionExtension::prepare) and is ultimately used for dispatch. + fn validate( + &self, + origin: OriginOf, + call: &Call, + info: &DispatchInfoOf, + len: usize, + context: &mut Context, + self_implicit: Self::Implicit, + inherited_implication: &impl Encode, + ) -> ValidateResult; + + /// Do any pre-flight stuff for a transaction after validation. + /// + /// This is for actions which do not happen in the transaction queue but only immediately prior + /// to the point of dispatch on-chain. This should not return an error, since errors should + /// already have been identified during the [validate](TransactionExtension::validate) call. If + /// an error is returned, the transaction will be considered invalid but no state changes will + /// happen and therefore work done in [validate](TransactionExtension::validate) will not be + /// paid for. + /// + /// Unlike `validate`, this function may consume `self`. + /// + /// Parameters: + /// - `val`: `Self::Val` returned by the result of the `validate` call. + /// - `origin`: The origin returned by the result of the `validate` call. + /// - `call`: The `Call` wrapped by this extension. + /// - `info`: Information concerning, and inherent to, the transaction's call. + /// - `len`: The total length of the encoded transaction. + /// - `context`: Some opaque mutable context, as yet unused. + /// + /// Returns a [Self::Pre] value on success, which gets passed into + /// [post_dispatch](TransactionExtension::post_dispatch) and after the call is dispatched. + /// + /// IMPORTANT: **Checks made in validation need not be repeated here.** + fn prepare( + self, + val: Self::Val, + origin: &OriginOf, + call: &Call, + info: &DispatchInfoOf, + len: usize, + context: &Context, + ) -> Result; + + /// Do any post-flight stuff for an extrinsic. + /// + /// `_pre` contains the output of `prepare`. + /// + /// This gets given the `DispatchResult` `_result` from the extrinsic and can, if desired, + /// introduce a `TransactionValidityError`, causing the block to become invalid for including + /// it. + /// + /// Parameters: + /// - `pre`: `Self::Pre` returned by the result of the `prepare` call prior to dispatch. + /// - `info`: Information concerning, and inherent to, the transaction's call. + /// - `post_info`: Information concerning the dispatch of the transaction's call. + /// - `len`: The total length of the encoded transaction. + /// - `result`: The result of the dispatch. + /// - `context`: Some opaque mutable context, as yet unused. + /// + /// WARNING: It is dangerous to return an error here. To do so will fundamentally invalidate the + /// transaction and any block that it is included in, causing the block author to not be + /// compensated for their work in validating the transaction or producing the block so far. It + /// can only be used safely when you *know* that the transaction is one that would only be + /// introduced by the current block author. + fn post_dispatch( + _pre: Self::Pre, + _info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + _context: &Context, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } + + /// Compatibility function for supporting the `SignedExtension::validate_unsigned` function. + /// + /// DO NOT USE! THIS MAY BE REMOVED AT ANY TIME! + #[deprecated = "Only for compatibility. DO NOT USE."] + fn validate_bare_compat( + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> TransactionValidity { + Ok(ValidTransaction::default()) + } + + /// Compatibility function for supporting the `SignedExtension::pre_dispatch_unsigned` function. + /// + /// DO NOT USE! THIS MAY BE REMOVED AT ANY TIME! + #[deprecated = "Only for compatibility. DO NOT USE."] + fn pre_dispatch_bare_compat( + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } + + /// Compatibility function for supporting the `SignedExtension::post_dispatch` function where + /// `pre` is `None`. + /// + /// DO NOT USE! THIS MAY BE REMOVED AT ANY TIME! + #[deprecated = "Only for compatibility. DO NOT USE."] + fn post_dispatch_bare_compat( + _info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } +} + +/// Implict +#[macro_export] +macro_rules! impl_tx_ext_default { + ($call:ty ; $context:ty ; , $( $rest:tt )*) => { + impl_tx_ext_default!{$call ; $context ; $( $rest )*} + }; + ($call:ty ; $context:ty ; validate $( $rest:tt )*) => { + fn validate( + &self, + origin: $crate::traits::OriginOf<$call>, + _call: &$call, + _info: &$crate::traits::DispatchInfoOf<$call>, + _len: usize, + _context: &mut $context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl $crate::codec::Encode, + ) -> $crate::traits::ValidateResult { + Ok((Default::default(), Default::default(), origin)) + } + impl_tx_ext_default!{$call ; $context ; $( $rest )*} + }; + ($call:ty ; $context:ty ; prepare $( $rest:tt )*) => { + fn prepare( + self, + _val: Self::Val, + _origin: &$crate::traits::OriginOf<$call>, + _call: &$call, + _info: &$crate::traits::DispatchInfoOf<$call>, + _len: usize, + _context: & $context, + ) -> Result { + Ok(Default::default()) + } + impl_tx_ext_default!{$call ; $context ; $( $rest )*} + }; + ($call:ty ; $context:ty ;) => {}; +} + +/// Information about a [`TransactionExtension`] for the runtime metadata. +pub struct TransactionExtensionMetadata { + /// The unique identifier of the [`TransactionExtension`]. + pub identifier: &'static str, + /// The type of the [`TransactionExtension`]. + pub ty: MetaType, + /// The type of the [`TransactionExtension`] additional signed data for the payload. + // TODO: Rename "implicit" + pub additional_signed: MetaType, +} + +#[impl_for_tuples(1, 12)] +impl TransactionExtensionBase for Tuple { + for_tuples!( where #( Tuple: TransactionExtensionBase )* ); + const IDENTIFIER: &'static str = "Use `metadata()`!"; + for_tuples!( type Implicit = ( #( Tuple::Implicit ),* ); ); + fn implicit(&self) -> Result { + Ok(for_tuples!( ( #( Tuple.implicit()? ),* ) )) + } + fn weight(&self) -> Weight { + let mut weight = Weight::zero(); + for_tuples!( #( weight += Tuple.weight(); )* ); + weight + } + fn metadata() -> Vec { + let mut ids = Vec::new(); + for_tuples!( #( ids.extend(Tuple::metadata()); )* ); + ids + } +} + +#[impl_for_tuples(1, 12)] +impl TransactionExtension for Tuple { + for_tuples!( where #( Tuple: TransactionExtension )* ); + for_tuples!( type Val = ( #( Tuple::Val ),* ); ); + for_tuples!( type Pre = ( #( Tuple::Pre ),* ); ); + + fn validate( + &self, + origin: ::RuntimeOrigin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + context: &mut Context, + self_implicit: Self::Implicit, + inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + let valid = ValidTransaction::default(); + let val = (); + let following_explicit_implications = for_tuples!( ( #( &self.Tuple ),* ) ); + let following_implicit_implications = self_implicit; + + for_tuples!(#( + // Implication of this pipeline element not relevant for later items, so we pop it. + let (_item, following_explicit_implications) = following_explicit_implications.pop_front(); + let (item_implicit, following_implicit_implications) = following_implicit_implications.pop_front(); + let (item_valid, item_val, origin) = { + let implications = ( + // The first is the implications born of the fact we return the mutated + // origin. + inherited_implication, + // This is the explicitly made implication born of the fact the new origin is + // passed into the next items in this pipeline-tuple. + &following_explicit_implications, + // This is the implicitly made implication born of the fact the new origin is + // passed into the next items in this pipeline-tuple. + &following_implicit_implications, + ); + Tuple.validate(origin, call, info, len, context, item_implicit, &implications)? + }; + let valid = valid.combine_with(item_valid); + let val = val.push_back(item_val); + )* ); + Ok((valid, val, origin)) + } + + fn prepare( + self, + val: Self::Val, + origin: &::RuntimeOrigin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + context: &Context, + ) -> Result { + Ok(for_tuples!( ( #( + Tuple::prepare(self.Tuple, val.Tuple, origin, call, info, len, context)? + ),* ) )) + } + + fn post_dispatch( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + context: &Context, + ) -> Result<(), TransactionValidityError> { + for_tuples!( #( Tuple::post_dispatch(pre.Tuple, info, post_info, len, result, context)?; )* ); + Ok(()) + } +} + +impl TransactionExtensionBase for () { + const IDENTIFIER: &'static str = "UnitTransactionExtension"; + type Implicit = (); + fn implicit(&self) -> sp_std::result::Result { + Ok(()) + } + fn weight(&self) -> Weight { + Weight::zero() + } +} + +impl TransactionExtension for () { + type Val = (); + type Pre = (); + fn validate( + &self, + origin: ::RuntimeOrigin, + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, (), ::RuntimeOrigin), + TransactionValidityError, + > { + Ok((ValidTransaction::default(), (), origin)) + } + fn prepare( + self, + _val: (), + _origin: &::RuntimeOrigin, + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + _context: &Context, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } +} diff --git a/substrate/primitives/runtime/src/transaction_validity.rs b/substrate/primitives/runtime/src/transaction_validity.rs index 836948493823cb4d137ce90721788e9a78a8ac99..24124128083c0e48ef0b7cd6cf590ecf1c6bb7cb 100644 --- a/substrate/primitives/runtime/src/transaction_validity.rs +++ b/substrate/primitives/runtime/src/transaction_validity.rs @@ -82,6 +82,8 @@ pub enum InvalidTransaction { MandatoryValidation, /// The sending address is disabled or known to be invalid. BadSigner, + /// The implicit data was unable to be calculated. + IndeterminateImplicit, } impl InvalidTransaction { @@ -113,6 +115,8 @@ impl From for &'static str { "Transaction dispatch is mandatory; transactions must not be validated.", InvalidTransaction::Custom(_) => "InvalidTransaction custom error", InvalidTransaction::BadSigner => "Invalid signing address", + InvalidTransaction::IndeterminateImplicit => + "The implicit data was unable to be calculated", } } } @@ -338,7 +342,7 @@ pub struct ValidTransactionBuilder { impl ValidTransactionBuilder { /// Set the priority of a transaction. /// - /// Note that the final priority for `FRAME` is combined from all `SignedExtension`s. + /// Note that the final priority for `FRAME` is combined from all `TransactionExtension`s. /// Most likely for unsigned transactions you want the priority to be higher /// than for regular transactions. We recommend exposing a base priority for unsigned /// transactions as a runtime module parameter, so that the runtime can tune inter-module diff --git a/substrate/primitives/weights/src/lib.rs b/substrate/primitives/weights/src/lib.rs index aede9473535a361325f64c177dd9f1861664ca40..b2c956266e2c3a346ca32c7b74ceb7f4c5cc2c0c 100644 --- a/substrate/primitives/weights/src/lib.rs +++ b/substrate/primitives/weights/src/lib.rs @@ -18,9 +18,6 @@ //! # Primitives for transaction weighting. #![cfg_attr(not(feature = "std"), no_std)] -// TODO remove once `OldWeight` is gone. I dont know why this is needed, maybe by one of the macros -// of `OldWeight`. -#![allow(deprecated)] extern crate self as sp_weights; @@ -28,7 +25,7 @@ mod weight_meter; mod weight_v2; use bounded_collections::Get; -use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode}; use scale_info::TypeInfo; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -52,28 +49,6 @@ pub mod constants { pub const WEIGHT_PROOF_SIZE_PER_KB: u64 = 1024; } -/// The old weight type. -/// -/// NOTE: This type exists purely for compatibility purposes! Use [`weight_v2::Weight`] in all other -/// cases. -#[derive( - Decode, - Encode, - CompactAs, - PartialEq, - Eq, - Clone, - Copy, - RuntimeDebug, - Default, - MaxEncodedLen, - TypeInfo, -)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "serde", serde(transparent))] -#[deprecated(note = "Will be removed soon; use `Weight` instead.")] -pub struct OldWeight(pub u64); - /// The weight of database operations that the runtime can invoke. /// /// NOTE: This is currently only measured in computational time, and will probably diff --git a/substrate/primitives/weights/src/weight_meter.rs b/substrate/primitives/weights/src/weight_meter.rs index 1738948e4c3c36ad5744ee3469589d7167306178..cfe8396ae6d67ed36d038f6afc2a7700c56b815a 100644 --- a/substrate/primitives/weights/src/weight_meter.rs +++ b/substrate/primitives/weights/src/weight_meter.rs @@ -118,13 +118,6 @@ impl WeightMeter { debug_assert!(self.consumed.all_lte(self.limit), "Weight counter overflow"); } - /// Consume the given weight after checking that it can be consumed and return `true`. Otherwise - /// do nothing and return `false`. - #[deprecated(note = "Use `try_consume` instead. Will be removed after December 2023.")] - pub fn check_accrue(&mut self, w: Weight) -> bool { - self.try_consume(w).is_ok() - } - /// Consume the given weight after checking that it can be consumed. /// /// Returns `Ok` if the weight can be consumed or otherwise an `Err`. @@ -139,12 +132,6 @@ impl WeightMeter { }) } - /// Check if the given weight can be consumed. - #[deprecated(note = "Use `can_consume` instead. Will be removed after December 2023.")] - pub fn can_accrue(&self, w: Weight) -> bool { - self.can_consume(w) - } - /// Check if the given weight can be consumed. pub fn can_consume(&self, w: Weight) -> bool { self.consumed.checked_add(&w).map_or(false, |t| t.all_lte(self.limit)) @@ -165,80 +152,80 @@ mod tests { fn weight_meter_remaining_works() { let mut meter = WeightMeter::with_limit(Weight::from_parts(10, 20)); - assert!(meter.check_accrue(Weight::from_parts(5, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(5, 0)), Ok(())); assert_eq!(meter.consumed, Weight::from_parts(5, 0)); assert_eq!(meter.remaining(), Weight::from_parts(5, 20)); - assert!(meter.check_accrue(Weight::from_parts(2, 10))); + assert_eq!(meter.try_consume(Weight::from_parts(2, 10)), Ok(())); assert_eq!(meter.consumed, Weight::from_parts(7, 10)); assert_eq!(meter.remaining(), Weight::from_parts(3, 10)); - assert!(meter.check_accrue(Weight::from_parts(3, 10))); + assert_eq!(meter.try_consume(Weight::from_parts(3, 10)), Ok(())); assert_eq!(meter.consumed, Weight::from_parts(10, 20)); assert_eq!(meter.remaining(), Weight::from_parts(0, 0)); } #[test] - fn weight_meter_can_accrue_works() { + fn weight_meter_can_consume_works() { let meter = WeightMeter::with_limit(Weight::from_parts(1, 1)); - assert!(meter.can_accrue(Weight::from_parts(0, 0))); - assert!(meter.can_accrue(Weight::from_parts(1, 1))); - assert!(!meter.can_accrue(Weight::from_parts(0, 2))); - assert!(!meter.can_accrue(Weight::from_parts(2, 0))); - assert!(!meter.can_accrue(Weight::from_parts(2, 2))); + assert!(meter.can_consume(Weight::from_parts(0, 0))); + assert!(meter.can_consume(Weight::from_parts(1, 1))); + assert!(!meter.can_consume(Weight::from_parts(0, 2))); + assert!(!meter.can_consume(Weight::from_parts(2, 0))); + assert!(!meter.can_consume(Weight::from_parts(2, 2))); } #[test] - fn weight_meter_check_accrue_works() { + fn weight_meter_try_consume_works() { let mut meter = WeightMeter::with_limit(Weight::from_parts(2, 2)); - assert!(meter.check_accrue(Weight::from_parts(0, 0))); - assert!(meter.check_accrue(Weight::from_parts(1, 1))); - assert!(!meter.check_accrue(Weight::from_parts(0, 2))); - assert!(!meter.check_accrue(Weight::from_parts(2, 0))); - assert!(!meter.check_accrue(Weight::from_parts(2, 2))); - assert!(meter.check_accrue(Weight::from_parts(0, 1))); - assert!(meter.check_accrue(Weight::from_parts(1, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 0)), Ok(())); + assert_eq!(meter.try_consume(Weight::from_parts(1, 1)), Ok(())); + assert_eq!(meter.try_consume(Weight::from_parts(0, 2)), Err(())); + assert_eq!(meter.try_consume(Weight::from_parts(2, 0)), Err(())); + assert_eq!(meter.try_consume(Weight::from_parts(2, 2)), Err(())); + assert_eq!(meter.try_consume(Weight::from_parts(0, 1)), Ok(())); + assert_eq!(meter.try_consume(Weight::from_parts(1, 0)), Ok(())); } #[test] - fn weight_meter_check_and_can_accrue_works() { + fn weight_meter_check_and_can_consume_works() { let mut meter = WeightMeter::new(); - assert!(meter.can_accrue(Weight::from_parts(u64::MAX, 0))); - assert!(meter.check_accrue(Weight::from_parts(u64::MAX, 0))); + assert!(meter.can_consume(Weight::from_parts(u64::MAX, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(u64::MAX, 0)), Ok(())); - assert!(meter.can_accrue(Weight::from_parts(0, u64::MAX))); - assert!(meter.check_accrue(Weight::from_parts(0, u64::MAX))); + assert!(meter.can_consume(Weight::from_parts(0, u64::MAX))); + assert_eq!(meter.try_consume(Weight::from_parts(0, u64::MAX)), Ok(())); - assert!(!meter.can_accrue(Weight::from_parts(0, 1))); - assert!(!meter.check_accrue(Weight::from_parts(0, 1))); + assert!(!meter.can_consume(Weight::from_parts(0, 1))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 1)), Err(())); - assert!(!meter.can_accrue(Weight::from_parts(1, 0))); - assert!(!meter.check_accrue(Weight::from_parts(1, 0))); + assert!(!meter.can_consume(Weight::from_parts(1, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(1, 0)), Err(())); - assert!(meter.can_accrue(Weight::zero())); - assert!(meter.check_accrue(Weight::zero())); + assert!(meter.can_consume(Weight::zero())); + assert_eq!(meter.try_consume(Weight::zero()), Ok(())); } #[test] fn consumed_ratio_works() { let mut meter = WeightMeter::with_limit(Weight::from_parts(10, 20)); - assert!(meter.check_accrue(Weight::from_parts(5, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(5, 0)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(50)); - assert!(meter.check_accrue(Weight::from_parts(0, 12))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 12)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(60)); - assert!(meter.check_accrue(Weight::from_parts(2, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(2, 0)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(70)); - assert!(meter.check_accrue(Weight::from_parts(0, 4))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 4)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(80)); - assert!(meter.check_accrue(Weight::from_parts(3, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(3, 0)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(100)); - assert!(meter.check_accrue(Weight::from_parts(0, 4))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 4)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(100)); } diff --git a/substrate/scripts/run_all_benchmarks.sh b/substrate/scripts/run_all_benchmarks.sh index 6dd7cede319f974a074d951d34ec3c970f02c32f..fe5f89a5b56ea109cee20274ce7c1454b72b7009 100755 --- a/substrate/scripts/run_all_benchmarks.sh +++ b/substrate/scripts/run_all_benchmarks.sh @@ -107,6 +107,19 @@ for PALLET in "${PALLETS[@]}"; do FOLDER="$(echo "${PALLET#*_}" | tr '_' '-')"; WEIGHT_FILE="./frame/${FOLDER}/src/weights.rs" + + # Special handling of custom weight paths. + if [ "$PALLET" == "frame_system_extensions" ] || [ "$PALLET" == "frame-system-extensions" ] + then + WEIGHT_FILE="./frame/system/src/extensions/weights.rs" + elif [ "$PALLET" == "pallet_asset_conversion_tx_payment" ] || [ "$PALLET" == "pallet-asset-conversion-tx-payment" ] + then + WEIGHT_FILE="./frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs" + elif [ "$PALLET" == "pallet_asset_tx_payment" ] || [ "$PALLET" == "pallet-asset-tx-payment" ] + then + WEIGHT_FILE="./frame/transaction-payment/asset-tx-payment/src/weights.rs" + fi + echo "[+] Benchmarking $PALLET with weight file $WEIGHT_FILE"; OUTPUT=$( diff --git a/substrate/test-utils/runtime/src/extrinsic.rs b/substrate/test-utils/runtime/src/extrinsic.rs index 05ffb7db5d5b95d6d6dbadf5f48da563c04a4c59..7eb1839e97b68be8c36015bc8c7d3ef9b9620515 100644 --- a/substrate/test-utils/runtime/src/extrinsic.rs +++ b/substrate/test-utils/runtime/src/extrinsic.rs @@ -25,7 +25,7 @@ use codec::Encode; use frame_system::{CheckNonce, CheckWeight}; use sp_core::crypto::Pair as TraitPair; use sp_keyring::AccountKeyring; -use sp_runtime::{transaction_validity::TransactionPriority, Perbill}; +use sp_runtime::{generic::Preamble, transaction_validity::TransactionPriority, Perbill}; use sp_std::prelude::*; /// Transfer used in test substrate pallet. Extrinsic is created and signed using this data. @@ -66,11 +66,11 @@ impl TryFrom<&Extrinsic> for TransferData { match uxt { Extrinsic { function: RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }), - signature: Some((from, _, (CheckNonce(nonce), ..))), + preamble: Preamble::Signed(from, _, ((CheckNonce(nonce), ..), ..)), } => Ok(TransferData { from: *from, to: *dest, amount: *value, nonce: *nonce }), Extrinsic { function: RuntimeCall::SubstrateTest(PalletCall::bench_call { transfer }), - signature: None, + preamble: Preamble::Bare, } => Ok(transfer.clone()), _ => Err(()), } @@ -190,18 +190,17 @@ impl ExtrinsicBuilder { /// Build `Extrinsic` using embedded parameters pub fn build(self) -> Extrinsic { if let Some(signer) = self.signer { - let extra = ( - CheckNonce::from(self.nonce.unwrap_or(0)), - CheckWeight::new(), + let tx_ext = ( + (CheckNonce::from(self.nonce.unwrap_or(0)), CheckWeight::new()), CheckSubstrateCall {}, ); let raw_payload = - SignedPayload::from_raw(self.function.clone(), extra.clone(), ((), (), ())); + SignedPayload::from_raw(self.function.clone(), tx_ext.clone(), (((), ()), ())); let signature = raw_payload.using_encoded(|e| signer.sign(e)); - Extrinsic::new_signed(self.function, signer.public(), signature, extra) + Extrinsic::new_signed(self.function, signer.public(), signature, tx_ext) } else { - Extrinsic::new_unsigned(self.function) + Extrinsic::new_bare(self.function) } } } diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index db9ff187b707fa66c70be24844297b7f74844e4a..7ec5f6722c4dde6a6aa943154ac5aa9fb9062bd1 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -58,10 +58,12 @@ use sp_api::{decl_runtime_apis, impl_runtime_apis}; pub use sp_core::hash::H256; use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::{ - create_runtime_str, impl_opaque_keys, - traits::{BlakeTwo256, Block as BlockT, DispatchInfoOf, NumberFor, Verify}, - transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, - ApplyExtrinsicResult, Perbill, + create_runtime_str, impl_opaque_keys, impl_tx_ext_default, + traits::{BlakeTwo256, Block as BlockT, DispatchInfoOf, Dispatchable, NumberFor, Verify}, + transaction_validity::{ + TransactionSource, TransactionValidity, TransactionValidityError, ValidTransaction, + }, + ApplyExtrinsicResult, ExtrinsicInclusionMode, Perbill, }; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; @@ -142,13 +144,14 @@ pub type Signature = sr25519::Signature; #[cfg(feature = "std")] pub type Pair = sp_core::sr25519::Pair; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = (CheckNonce, CheckWeight, CheckSubstrateCall); +// TODO: Remove after the Checks are migrated to TxExtension. +/// The extension to the basic transaction logic. +pub type TxExtension = ((CheckNonce, CheckWeight), CheckSubstrateCall); /// The payload being signed in transactions. -pub type SignedPayload = sp_runtime::generic::SignedPayload; +pub type SignedPayload = sp_runtime::generic::SignedPayload; /// Unchecked extrinsic type as expected by this runtime. pub type Extrinsic = - sp_runtime::generic::UncheckedExtrinsic; + sp_runtime::generic::UncheckedExtrinsic; /// An identifier for an account on this system. pub type AccountId = ::Signer; @@ -243,7 +246,7 @@ impl sp_runtime::traits::Printable for CheckSubstrateCall { } impl sp_runtime::traits::Dispatchable for CheckSubstrateCall { - type RuntimeOrigin = CheckSubstrateCall; + type RuntimeOrigin = RuntimeOrigin; type Config = CheckSubstrateCall; type Info = CheckSubstrateCall; type PostInfo = CheckSubstrateCall; @@ -256,42 +259,37 @@ impl sp_runtime::traits::Dispatchable for CheckSubstrateCall { } } -impl sp_runtime::traits::SignedExtension for CheckSubstrateCall { - type AccountId = AccountId; - type Call = RuntimeCall; - type AdditionalSigned = (); - type Pre = (); +impl sp_runtime::traits::TransactionExtensionBase for CheckSubstrateCall { const IDENTIFIER: &'static str = "CheckSubstrateCall"; - - fn additional_signed( - &self, - ) -> sp_std::result::Result { - Ok(()) - } + type Implicit = (); +} +impl sp_runtime::traits::TransactionExtension + for CheckSubstrateCall +{ + type Pre = (); + type Val = (); + impl_tx_ext_default!(RuntimeCall; Context; prepare); fn validate( &self, - _who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { log::trace!(target: LOG_TARGET, "validate"); - match call { + let v = match call { RuntimeCall::SubstrateTest(ref substrate_test_call) => - substrate_test_pallet::validate_runtime_call(substrate_test_call), - _ => Ok(Default::default()), - } - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &sp_runtime::traits::DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(drop) + substrate_test_pallet::validate_runtime_call(substrate_test_call)?, + _ => Default::default(), + }; + Ok((v, (), origin)) } } @@ -364,6 +362,7 @@ impl frame_system::pallet::Config for Runtime { type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type ExtensionsWeightInfo = (); type SS58Prefix = (); type OnSetCode = (); type MaxConsumers = ConstU32<16>; @@ -480,9 +479,9 @@ impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> ExtrinsicInclusionMode { log::trace!(target: LOG_TARGET, "initialize_block: {header:#?}"); - Executive::initialize_block(header); + Executive::initialize_block(header) } } @@ -672,7 +671,7 @@ impl_runtime_apis! { impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { - let ext = Extrinsic::new_unsigned( + let ext = Extrinsic::new_bare( substrate_test_pallet::pallet::Call::storage_change{ key:b"some_key".encode(), value:Some(header.number.encode()) @@ -1025,7 +1024,7 @@ mod tests { use sp_core::{storage::well_known_keys::HEAP_PAGES, traits::CallContext}; use sp_keyring::AccountKeyring; use sp_runtime::{ - traits::{Hash as _, SignedExtension}, + traits::{DispatchTransaction, Hash as _}, transaction_validity::{InvalidTransaction, ValidTransaction}, }; use substrate_test_runtime_client::{ @@ -1174,31 +1173,33 @@ mod tests { fn check_substrate_check_signed_extension_works() { sp_tracing::try_init_simple(); new_test_ext().execute_with(|| { - let x = sp_keyring::AccountKeyring::Alice.into(); + let x: AccountId = sp_keyring::AccountKeyring::Alice.into(); let info = DispatchInfo::default(); let len = 0_usize; assert_eq!( CheckSubstrateCall {} - .validate( - &x, + .validate_only( + Some(x).into(), &ExtrinsicBuilder::new_call_with_priority(16).build().function, &info, - len + len, ) .unwrap() + .0 .priority, 16 ); assert_eq!( CheckSubstrateCall {} - .validate( - &x, + .validate_only( + Some(x).into(), &ExtrinsicBuilder::new_call_do_not_propagate().build().function, &info, - len + len, ) .unwrap() + .0 .propagate, false ); diff --git a/substrate/test-utils/runtime/transaction-pool/src/lib.rs b/substrate/test-utils/runtime/transaction-pool/src/lib.rs index 8c8345b06bd32e4efbe2f721ddebe2cc2f2f9c4e..5202e6e65154d62085a0656cc780cc39452e0ed9 100644 --- a/substrate/test-utils/runtime/transaction-pool/src/lib.rs +++ b/substrate/test-utils/runtime/transaction-pool/src/lib.rs @@ -81,6 +81,7 @@ pub struct ChainState { pub block_by_hash: HashMap, pub nonces: HashMap, pub invalid_hashes: HashSet, + pub priorities: HashMap, } /// Test Api for transaction pool. @@ -214,6 +215,22 @@ impl TestApi { self.chain.write().invalid_hashes.insert(Self::hash_and_length_inner(xts).0); } + /// Remove a transaction that was previously declared as invalid via `[Self::add_invalid]`. + /// + /// Next time transaction pool will try to validate this + /// extrinsic, api will succeed. + pub fn remove_invalid(&self, xts: &Extrinsic) { + self.chain.write().invalid_hashes.remove(&Self::hash_and_length_inner(xts).0); + } + + /// Set a transaction priority. + pub fn set_priority(&self, xts: &Extrinsic, priority: u64) { + self.chain + .write() + .priorities + .insert(Self::hash_and_length_inner(xts).0, priority); + } + /// Query validation requests received. pub fn validation_requests(&self) -> Vec { self.validation_requests.read().clone() @@ -300,8 +317,14 @@ impl ChainApi for TestApi { return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0))))) } - let mut validity = - ValidTransaction { priority: 1, requires, provides, longevity: 64, propagate: true }; + let priority = self.chain.read().priorities.get(&self.hash_and_length(&uxt).0).cloned(); + let mut validity = ValidTransaction { + priority: priority.unwrap_or(1), + requires, + provides, + longevity: 64, + propagate: true, + }; (self.valid_modifier.read())(&mut validity); diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index d5dc6226ab9f0fbd9611e5d44395ed36c1f3c803..c6ba282401672335a6a72e698533a4af35670523 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -298,16 +298,23 @@ impl PalletCmd { // Convert `Vec` to `String` for better readability. let benchmarks_to_run: Vec<_> = benchmarks_to_run .into_iter() - .map(|b| { + .map(|(pallet, extrinsic, components, pov_modes)| { + let pallet_name = + String::from_utf8(pallet.clone()).expect("Encoded from String; qed"); + let extrinsic_name = + String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"); ( - b.0, - b.1, - b.2, - b.3.into_iter() + pallet, + extrinsic, + components, + pov_modes + .into_iter() .map(|(p, s)| { (String::from_utf8(p).unwrap(), String::from_utf8(s).unwrap()) }) .collect(), + pallet_name, + extrinsic_name, ) }) .collect(); @@ -329,12 +336,12 @@ impl PalletCmd { let mut component_ranges = HashMap::<(Vec, Vec), Vec>::new(); let pov_modes = Self::parse_pov_modes(&benchmarks_to_run)?; - for (pallet, extrinsic, components, _) in benchmarks_to_run.clone() { + for (pallet, extrinsic, components, _, pallet_name, extrinsic_name) in + benchmarks_to_run.clone() + { log::info!( target: LOG_TARGET, - "Starting benchmark: {}::{}", - String::from_utf8(pallet.clone()).expect("Encoded from String; qed"), - String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"), + "Starting benchmark: {pallet_name}::{extrinsic_name}" ); let all_components = if components.is_empty() { vec![Default::default()] @@ -415,12 +422,7 @@ impl PalletCmd { ) .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))? .map_err(|e| { - format!( - "Benchmark {}::{} failed: {}", - String::from_utf8_lossy(&pallet), - String::from_utf8_lossy(&extrinsic), - e - ) + format!("Benchmark {pallet_name}::{extrinsic_name} failed: {e}",) })?; } // Do one loop of DB tracking. @@ -494,11 +496,7 @@ impl PalletCmd { log::info!( target: LOG_TARGET, - "Running benchmark: {}.{}({} args) {}/{} {}/{}", - String::from_utf8(pallet.clone()) - .expect("Encoded from String; qed"), - String::from_utf8(extrinsic.clone()) - .expect("Encoded from String; qed"), + "Running benchmark: {pallet_name}::{extrinsic_name}({} args) {}/{} {}/{}", components.len(), s + 1, // s starts at 0. all_components.len(), @@ -707,12 +705,14 @@ impl PalletCmd { Vec, Vec<(BenchmarkParameter, u32, u32)>, Vec<(String, String)>, + String, + String, )>, ) -> Result { use std::collections::hash_map::Entry; let mut parsed = PovModesMap::new(); - for (pallet, call, _components, pov_modes) in benchmarks { + for (pallet, call, _components, pov_modes, _, _) in benchmarks { for (pallet_storage, mode) in pov_modes { let mode = PovEstimationMode::from_str(&mode)?; let splits = pallet_storage.split("::").collect::>(); @@ -766,6 +766,8 @@ fn list_benchmark( Vec, Vec<(BenchmarkParameter, u32, u32)>, Vec<(String, String)>, + String, + String, )>, list_output: ListOutput, no_csv_header: bool, @@ -773,11 +775,11 @@ fn list_benchmark( let mut benchmarks = BTreeMap::new(); // Sort and de-dub by pallet and function name. - benchmarks_to_run.iter().for_each(|(pallet, extrinsic, _, _)| { + benchmarks_to_run.iter().for_each(|(_, _, _, _, pallet_name, extrinsic_name)| { benchmarks - .entry(String::from_utf8_lossy(pallet).to_string()) + .entry(pallet_name) .or_insert_with(BTreeSet::new) - .insert(String::from_utf8_lossy(extrinsic).to_string()); + .insert(extrinsic_name); }); match list_output { diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs b/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs index 1e5e294acba263aa4f3abf97e4249ce5917d9d42..a044049a0d614e42530f30badee173a63a28f0f5 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs @@ -22,7 +22,11 @@ use core::marker::PhantomData; /// Weight functions for `{{pallet}}`. pub struct WeightInfo(PhantomData); +{{#if (eq pallet "frame_system_extensions")}} +impl frame_system::ExtensionsWeightInfo for WeightInfo { +{{else}} impl {{pallet}}::WeightInfo for WeightInfo { +{{/if}} {{#each benchmarks as |benchmark|}} {{#each benchmark.comments as |comment|}} /// {{comment}} diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs index 9493a693bbed321785bf0d23326d0f68b3efd534..bd4b65d8a2e378d543b54bebd8db4a1c3d378f98 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -153,8 +153,8 @@ fn map_results( continue } - let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap(); - let instance_string = String::from_utf8(batch.instance.clone()).unwrap(); + let pallet_name = String::from_utf8(batch.pallet.clone()).unwrap(); + let instance_name = String::from_utf8(batch.instance.clone()).unwrap(); let benchmark_data = get_benchmark_data( batch, storage_info, @@ -166,7 +166,7 @@ fn map_results( worst_case_map_values, additional_trie_layers, ); - let pallet_benchmarks = all_benchmarks.entry((pallet_string, instance_string)).or_default(); + let pallet_benchmarks = all_benchmarks.entry((pallet_name, instance_name)).or_default(); pallet_benchmarks.push(benchmark_data); } Ok(all_benchmarks) @@ -571,19 +571,22 @@ pub(crate) fn process_storage_results( let mut prefix_result = result.clone(); let key_info = storage_info_map.get(&prefix); + let pallet_name = match key_info { + Some(k) => String::from_utf8(k.pallet_name.clone()).expect("encoded from string"), + None => "".to_string(), + }; + let storage_name = match key_info { + Some(k) => String::from_utf8(k.storage_name.clone()).expect("encoded from string"), + None => "".to_string(), + }; let max_size = key_info.and_then(|k| k.max_size); let override_pov_mode = match key_info { - Some(StorageInfo { pallet_name, storage_name, .. }) => { - let pallet_name = - String::from_utf8(pallet_name.clone()).expect("encoded from string"); - let storage_name = - String::from_utf8(storage_name.clone()).expect("encoded from string"); - + Some(_) => { // Is there an override for the storage key? - pov_modes.get(&(pallet_name.clone(), storage_name)).or( + pov_modes.get(&(pallet_name.clone(), storage_name.clone())).or( // .. or for the storage prefix? - pov_modes.get(&(pallet_name, "ALL".to_string())).or( + pov_modes.get(&(pallet_name.clone(), "ALL".to_string())).or( // .. or for the benchmark? pov_modes.get(&("ALL".to_string(), "ALL".to_string())), ), @@ -662,15 +665,10 @@ pub(crate) fn process_storage_results( // writes. if !is_prefix_identified { match key_info { - Some(key_info) => { + Some(_) => { let comment = format!( "Storage: `{}::{}` (r:{} w:{})", - String::from_utf8(key_info.pallet_name.clone()) - .expect("encoded from string"), - String::from_utf8(key_info.storage_name.clone()) - .expect("encoded from string"), - reads, - writes, + pallet_name, storage_name, reads, writes, ); comments.push(comment) }, @@ -698,11 +696,7 @@ pub(crate) fn process_storage_results( ) { Some(new_pov) => { let comment = format!( - "Proof: `{}::{}` (`max_values`: {:?}, `max_size`: {:?}, added: {}, mode: `{:?}`)", - String::from_utf8(key_info.pallet_name.clone()) - .expect("encoded from string"), - String::from_utf8(key_info.storage_name.clone()) - .expect("encoded from string"), + "Proof: `{pallet_name}::{storage_name}` (`max_values`: {:?}, `max_size`: {:?}, added: {}, mode: `{:?}`)", key_info.max_values, key_info.max_size, new_pov, @@ -711,13 +705,9 @@ pub(crate) fn process_storage_results( comments.push(comment) }, None => { - let pallet = String::from_utf8(key_info.pallet_name.clone()) - .expect("encoded from string"); - let item = String::from_utf8(key_info.storage_name.clone()) - .expect("encoded from string"); let comment = format!( "Proof: `{}::{}` (`max_values`: {:?}, `max_size`: {:?}, mode: `{:?}`)", - pallet, item, key_info.max_values, key_info.max_size, + pallet_name, storage_name, key_info.max_values, key_info.max_size, used_pov_mode, ); comments.push(comment); diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index c7399468da9da7e9e0d692ec659db3076a2d5a79..d49479d9cda955770dc32df4800c7bef70082816 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -1204,8 +1204,9 @@ where #[cfg(test)] mod test_prelude { pub(crate) use super::*; - pub(crate) use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; - pub(crate) type Block = RawBlock>; + pub(crate) use sp_runtime::testing::{Block as RawBlock, MockCallU64}; + pub(crate) type UncheckedXt = sp_runtime::generic::UncheckedExtrinsic; + pub(crate) type Block = RawBlock; pub(crate) fn init_logger() { sp_tracing::try_init_simple(); diff --git a/substrate/utils/frame/rpc/client/src/lib.rs b/substrate/utils/frame/rpc/client/src/lib.rs index 221f260b156699f012e17d5eecd8c311066d2902..4f4f4bbef56213f88ecfa6fe848e178d448d5560 100644 --- a/substrate/utils/frame/rpc/client/src/lib.rs +++ b/substrate/utils/frame/rpc/client/src/lib.rs @@ -199,11 +199,15 @@ where #[cfg(test)] mod tests { use super::*; - use sp_runtime::testing::{Block as TBlock, ExtrinsicWrapper, Header, H256}; + use sp_runtime::{ + generic::UncheckedExtrinsic, + testing::{Block as TBlock, Header, MockCallU64, H256}, + }; use std::sync::Arc; use tokio::sync::Mutex; - type Block = TBlock>; + type UncheckedXt = UncheckedExtrinsic; + type Block = TBlock; type BlockNumber = u64; type Hash = H256; diff --git a/substrate/utils/wasm-builder/src/prerequisites.rs b/substrate/utils/wasm-builder/src/prerequisites.rs index a601e3210dd0c2b89beffa1d5d6b788cb35fd6a3..22caf8950637960310c62eb98c5358de55c1662d 100644 --- a/substrate/utils/wasm-builder/src/prerequisites.rs +++ b/substrate/utils/wasm-builder/src/prerequisites.rs @@ -149,6 +149,14 @@ impl<'a> DummyCrate<'a> { sysroot_cmd.output().ok().and_then(|o| String::from_utf8(o.stdout).ok()) } + fn get_toolchain(&self) -> Option { + let sysroot = self.get_sysroot()?; + Path::new(sysroot.trim()) + .file_name() + .and_then(|s| s.to_str()) + .map(|s| s.to_string()) + } + fn try_build(&self) -> Result<(), Option> { let Ok(result) = self.prepare_command("build").output() else { return Err(None) }; if !result.status.success() { @@ -164,14 +172,15 @@ fn check_wasm_toolchain_installed( let dummy_crate = DummyCrate::new(&cargo_command, RuntimeTarget::Wasm); if let Err(error) = dummy_crate.try_build() { + let toolchain = dummy_crate.get_toolchain().unwrap_or("".to_string()); let basic_error_message = colorize_error_message( - "Rust WASM toolchain is not properly installed; please install it!", + &format!("Rust WASM target for toolchain {toolchain} is not properly installed; please install it!") ); return match error { None => Err(basic_error_message), Some(error) if error.contains("the `wasm32-unknown-unknown` target may not be installed") => { - Err(colorize_error_message("Cannot compile the WASM runtime: the `wasm32-unknown-unknown` target is not installed!\n\ - You can install it with `rustup target add wasm32-unknown-unknown` if you're using `rustup`.")) + Err(colorize_error_message(&format!("Cannot compile the WASM runtime: the `wasm32-unknown-unknown` target is not installed!\n\ + You can install it with `rustup target add wasm32-unknown-unknown --toolchain {toolchain}` if you're using `rustup`."))) }, // Apparently this can happen when we're running on a non Tier 1 platform. Some(ref error) if error.contains("linker `rust-lld` not found") => @@ -193,9 +202,10 @@ fn check_wasm_toolchain_installed( let src_path = Path::new(sysroot.trim()).join("lib").join("rustlib").join("src").join("rust"); if !src_path.exists() { + let toolchain = dummy_crate.get_toolchain().unwrap_or("".to_string()); return Err(colorize_error_message( - "Cannot compile the WASM runtime: no standard library sources found!\n\ - You can install them with `rustup component add rust-src` if you're using `rustup`.", + &format!("Cannot compile the WASM runtime: no standard library sources found at {}!\n\ + You can install them with `rustup component add rust-src --toolchain {toolchain}` if you're using `rustup`.", src_path.display()), )) } } diff --git a/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl b/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl index ea0f15e5d8ace1315e231c6db2e01399e0ce44f3..b68bce508c008396d699e67e08b98cdf74ba5138 100644 --- a/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl +++ b/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl @@ -34,8 +34,8 @@ bob: reports block height is at least {{DB_BLOCK_HEIGHT}} within 10 seconds alice: reports substrate_beefy_best_block is at least {{DB_BLOCK_HEIGHT}} within 180 seconds bob: reports substrate_beefy_best_block is at least {{DB_BLOCK_HEIGHT}} within 180 seconds -alice: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 60 seconds -bob: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 60 seconds +alice: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 180 seconds +bob: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 180 seconds alice: count of log lines containing "error" is 0 within 10 seconds bob: count of log lines containing "verification failed" is 0 within 10 seconds diff --git a/templates/minimal/README.md b/templates/minimal/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..410cfc9f6c34844a2c6edb5390c6e16c50cc4406 --- /dev/null +++ b/templates/minimal/node/Cargo.toml @@ -0,0 +1,62 @@ +[package] +name = "minimal-template-node" +description = "A miniaml Substrate-based Substrate node, ready for hacking." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false +build = "build.rs" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +clap = { version = "4.5.1", features = ["derive"] } +futures = { version = "0.3.21", features = ["thread-pool"] } +futures-timer = "3.0.1" +jsonrpsee = { version = "0.22", features = ["server"] } +serde_json = { workspace = true, default-features = true } + +sc-cli = { path = "../../../substrate/client/cli" } +sc-executor = { path = "../../../substrate/client/executor" } +sc-network = { path = "../../../substrate/client/network" } +sc-service = { path = "../../../substrate/client/service" } +sc-telemetry = { path = "../../../substrate/client/telemetry" } +sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } +sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } +sc-consensus = { path = "../../../substrate/client/consensus/common" } +sc-consensus-manual-seal = { path = "../../../substrate/client/consensus/manual-seal" } +sc-rpc-api = { path = "../../../substrate/client/rpc-api" } +sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } +sc-offchain = { path = "../../../substrate/client/offchain" } +sc-client-api = { path = "../../../substrate/client/api" } + +sp-timestamp = { path = "../../../substrate/primitives/timestamp" } +sp-keyring = { path = "../../../substrate/primitives/keyring" } +sp-api = { path = "../../../substrate/primitives/api" } +sp-blockchain = { path = "../../../substrate/primitives/blockchain" } +sp-block-builder = { path = "../../../substrate/primitives/block-builder" } +sp-io = { path = "../../../substrate/primitives/io" } +sp-runtime = { path = "../../../substrate/primitives/runtime" } + +substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system" } + +# Once the native runtime is gone, there should be little to no dependency on FRAME here, and +# certainly no dependency on the runtime. +frame = { path = "../../../substrate/frame", features = [ + "experimental", + "runtime", +] } +runtime = { package = "minimal-template-runtime", path = "../runtime" } + +[build-dependencies] +substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } + +[features] +default = [] diff --git a/substrate/bin/minimal/node/build.rs b/templates/minimal/node/build.rs similarity index 100% rename from substrate/bin/minimal/node/build.rs rename to templates/minimal/node/build.rs diff --git a/substrate/bin/minimal/node/src/chain_spec.rs b/templates/minimal/node/src/chain_spec.rs similarity index 100% rename from substrate/bin/minimal/node/src/chain_spec.rs rename to templates/minimal/node/src/chain_spec.rs diff --git a/substrate/bin/minimal/node/src/cli.rs b/templates/minimal/node/src/cli.rs similarity index 100% rename from substrate/bin/minimal/node/src/cli.rs rename to templates/minimal/node/src/cli.rs diff --git a/substrate/bin/minimal/node/src/command.rs b/templates/minimal/node/src/command.rs similarity index 100% rename from substrate/bin/minimal/node/src/command.rs rename to templates/minimal/node/src/command.rs diff --git a/substrate/bin/minimal/node/src/lib.rs b/templates/minimal/node/src/lib.rs similarity index 94% rename from substrate/bin/minimal/node/src/lib.rs rename to templates/minimal/node/src/lib.rs index c2065def736aeeb3b1102bade04063532128f3cd..cb8ed3bd209dd24d1bfa9d53b904d411acbea02e 100644 --- a/substrate/bin/minimal/node/src/lib.rs +++ b/templates/minimal/node/src/lib.rs @@ -1,4 +1,4 @@ -// This file is part of Substrate. +// This file is part of Polkadot Sdk. // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 diff --git a/substrate/bin/minimal/node/src/main.rs b/templates/minimal/node/src/main.rs similarity index 100% rename from substrate/bin/minimal/node/src/main.rs rename to templates/minimal/node/src/main.rs diff --git a/substrate/bin/minimal/node/src/rpc.rs b/templates/minimal/node/src/rpc.rs similarity index 100% rename from substrate/bin/minimal/node/src/rpc.rs rename to templates/minimal/node/src/rpc.rs diff --git a/substrate/bin/minimal/node/src/service.rs b/templates/minimal/node/src/service.rs similarity index 100% rename from substrate/bin/minimal/node/src/service.rs rename to templates/minimal/node/src/service.rs diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9982e5ea53bc3eff28fb0331cfb14ad41a15eec7 --- /dev/null +++ b/templates/minimal/pallets/template/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "pallet-minimal-template" +description = "A minimal pallet built with FRAME, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", features = [ + "derive", +], default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } +frame = { path = "../../../../substrate/frame", default-features = false, features = [ + "experimental", + "runtime", +] } + + +[features] +default = ["std"] +std = ["codec/std", "frame/std", "scale-info/std"] diff --git a/templates/minimal/pallets/template/src/lib.rs b/templates/minimal/pallets/template/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..713f014bbe61fc9fa7df5019c00afddebf02bbc6 --- /dev/null +++ b/templates/minimal/pallets/template/src/lib.rs @@ -0,0 +1,19 @@ +//! A shell pallet built with [`frame`]. + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame::prelude::*; + +// Re-export all pallet parts, this is needed to properly import the pallet into the runtime. +pub use pallet::*; + +#[frame::pallet] +pub mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..20ffb706eb4992b71a7b34d638b7c4cdc70237fd --- /dev/null +++ b/templates/minimal/runtime/Cargo.toml @@ -0,0 +1,59 @@ +[package] +name = "minimal-template-runtime" +description = "A solochain runtime template built with Substrate, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[dependencies] +parity-scale-codec = { version = "3.0.0", default-features = false } +scale-info = { version = "2.6.0", default-features = false } + +# this is a frame-based runtime, thus importing `frame` with runtime feature enabled. +frame = { path = "../../../substrate/frame", default-features = false, features = [ + "experimental", + "runtime", +] } + +# pallets that we want to use +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } +pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } + +# genesis builder that allows us to interacto with runtime genesis config +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } + +# local pallet templates +pallet-minimal-template = { path = "../pallets/template", default-features = false } + +[build-dependencies] +substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "scale-info/std", + + "frame/std", + + "pallet-balances/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + + "pallet-minimal-template/std", + + "sp-genesis-builder/std", + "substrate-wasm-builder", +] diff --git a/substrate/bin/minimal/runtime/build.rs b/templates/minimal/runtime/build.rs similarity index 100% rename from substrate/bin/minimal/runtime/build.rs rename to templates/minimal/runtime/build.rs diff --git a/substrate/bin/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs similarity index 92% rename from substrate/bin/minimal/runtime/src/lib.rs rename to templates/minimal/runtime/src/lib.rs index 610289693d91b778295b3e48569fc887f805a204..6920511e2765cd8c62f9049b5d06e960885b371f 100644 --- a/substrate/bin/minimal/runtime/src/lib.rs +++ b/templates/minimal/runtime/src/lib.rs @@ -22,21 +22,24 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); use frame::{ - deps::frame_support::weights::{FixedFee, NoFee}, + deps::frame_support::{ + genesis_builder_helper::{build_config, create_default_config}, + weights::{FixedFee, NoFee}, + }, prelude::*, runtime::{ apis::{ - self, impl_runtime_apis, ApplyExtrinsicResult, CheckInherentsResult, OpaqueMetadata, + self, impl_runtime_apis, ApplyExtrinsicResult, CheckInherentsResult, + ExtrinsicInclusionMode, OpaqueMetadata, }, prelude::*, }, }; -use frame_support::genesis_builder_helper::{build_config, create_default_config}; #[runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("minimal-runtime"), - impl_name: create_runtime_str!("minimal-runtime"), + spec_name: create_runtime_str!("minimal-template-runtime"), + impl_name: create_runtime_str!("minimal-template-runtime"), authoring_version: 1, spec_version: 0, impl_version: 1, @@ -51,7 +54,7 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } -type SignedExtra = ( +type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -70,6 +73,9 @@ construct_runtime!( Balances: pallet_balances, Sudo: pallet_sudo, TransactionPayment: pallet_transaction_payment, + + // our local pallet + Template: pallet_minimal_template, } ); @@ -103,7 +109,9 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = FixedFee<1, ::Balance>; } -type Block = frame::runtime::types_common::BlockOf; +impl pallet_minimal_template::Config for Runtime {} + +type Block = frame::runtime::types_common::BlockOf; type Header = HeaderFor; type RuntimeExecutive = @@ -121,7 +129,7 @@ impl_runtime_apis! { RuntimeExecutive::execute_block(block) } - fn initialize_block(header: &Header) { + fn initialize_block(header: &Header) -> ExtrinsicInclusionMode { RuntimeExecutive::initialize_block(header) } } diff --git a/cumulus/parachain-template/LICENSE b/templates/parachain/LICENSE similarity index 100% rename from cumulus/parachain-template/LICENSE rename to templates/parachain/LICENSE diff --git a/cumulus/parachain-template/README.md b/templates/parachain/README.md similarity index 100% rename from cumulus/parachain-template/README.md rename to templates/parachain/README.md diff --git a/cumulus/parachain-template/node/Cargo.toml b/templates/parachain/node/Cargo.toml similarity index 78% rename from cumulus/parachain-template/node/Cargo.toml rename to templates/parachain/node/Cargo.toml index 0ef678c4cbaeafc7a603748aec54ce2129a95f54..b4f3a504a4d9d7aebef6262d5677e6ff9967b8c2 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -1,18 +1,21 @@ [package] name = "parachain-template-node" -version = "0.1.0" -authors = ["Anonymous"] -description = "A new Cumulus FRAME-based Substrate Node, ready for hacking together a parachain." -license = "Unlicense" -homepage = "https://substrate.io" +description = "A parachain node template built with Substrate and Cumulus, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true repository.workspace = true edition.workspace = true -build = "build.rs" publish = false +build = "build.rs" [lints] workspace = true +# [[bin]] +# name = "parachain-template-node" + [dependencies] clap = { version = "4.5.1", features = ["derive"] } log = { workspace = true, default-features = true } @@ -63,15 +66,15 @@ polkadot-primitives = { path = "../../../polkadot/primitives" } xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } # Cumulus -cumulus-client-cli = { path = "../../client/cli" } -cumulus-client-collator = { path = "../../client/collator" } -cumulus-client-consensus-aura = { path = "../../client/consensus/aura" } -cumulus-client-consensus-common = { path = "../../client/consensus/common" } -cumulus-client-consensus-proposer = { path = "../../client/consensus/proposer" } -cumulus-client-service = { path = "../../client/service" } -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } -cumulus-relay-chain-interface = { path = "../../client/relay-chain-interface" } +cumulus-client-cli = { path = "../../../cumulus/client/cli" } +cumulus-client-collator = { path = "../../../cumulus/client/collator" } +cumulus-client-consensus-aura = { path = "../../../cumulus/client/consensus/aura" } +cumulus-client-consensus-common = { path = "../../../cumulus/client/consensus/common" } +cumulus-client-consensus-proposer = { path = "../../../cumulus/client/consensus/proposer" } +cumulus-client-service = { path = "../../../cumulus/client/service" } +cumulus-primitives-core = { path = "../../../cumulus/primitives/core" } +cumulus-primitives-parachain-inherent = { path = "../../../cumulus/primitives/parachain-inherent" } +cumulus-relay-chain-interface = { path = "../../../cumulus/client/relay-chain-interface" } color-print = "0.3.4" [build-dependencies] diff --git a/cumulus/parachain-template/node/build.rs b/templates/parachain/node/build.rs similarity index 100% rename from cumulus/parachain-template/node/build.rs rename to templates/parachain/node/build.rs diff --git a/cumulus/parachain-template/node/src/chain_spec.rs b/templates/parachain/node/src/chain_spec.rs similarity index 93% rename from cumulus/parachain-template/node/src/chain_spec.rs rename to templates/parachain/node/src/chain_spec.rs index a79c78699c07102aca27ed06c73f93ccf86ed3ce..16c91865cdb4aa9ae3c5882652100f5ef56a988c 100644 --- a/cumulus/parachain-template/node/src/chain_spec.rs +++ b/templates/parachain/node/src/chain_spec.rs @@ -1,5 +1,6 @@ use cumulus_primitives_core::ParaId; -use parachain_template_runtime::{AccountId, AuraId, Signature, EXISTENTIAL_DEPOSIT}; +use parachain_template_runtime as runtime; +use runtime::{AccountId, AuraId, Signature, EXISTENTIAL_DEPOSIT}; use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; use sc_service::ChainType; use serde::{Deserialize, Serialize}; @@ -56,8 +57,8 @@ where /// Generate the session keys from individual elements. /// /// The input must be a tuple of individual keys (a single arg for now since we have just one key). -pub fn template_session_keys(keys: AuraId) -> parachain_template_runtime::SessionKeys { - parachain_template_runtime::SessionKeys { aura: keys } +pub fn template_session_keys(keys: AuraId) -> runtime::SessionKeys { + runtime::SessionKeys { aura: keys } } pub fn development_config() -> ChainSpec { @@ -68,8 +69,7 @@ pub fn development_config() -> ChainSpec { properties.insert("ss58Format".into(), 42.into()); ChainSpec::builder( - parachain_template_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), + runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), // You MUST set this to the correct network! @@ -120,8 +120,7 @@ pub fn local_testnet_config() -> ChainSpec { #[allow(deprecated)] ChainSpec::builder( - parachain_template_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), + runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), // You MUST set this to the correct network! diff --git a/cumulus/parachain-template/node/src/cli.rs b/templates/parachain/node/src/cli.rs similarity index 100% rename from cumulus/parachain-template/node/src/cli.rs rename to templates/parachain/node/src/cli.rs diff --git a/cumulus/parachain-template/node/src/command.rs b/templates/parachain/node/src/command.rs similarity index 100% rename from cumulus/parachain-template/node/src/command.rs rename to templates/parachain/node/src/command.rs diff --git a/cumulus/parachain-template/node/src/main.rs b/templates/parachain/node/src/main.rs similarity index 100% rename from cumulus/parachain-template/node/src/main.rs rename to templates/parachain/node/src/main.rs diff --git a/cumulus/parachain-template/node/src/rpc.rs b/templates/parachain/node/src/rpc.rs similarity index 100% rename from cumulus/parachain-template/node/src/rpc.rs rename to templates/parachain/node/src/rpc.rs diff --git a/cumulus/parachain-template/node/src/service.rs b/templates/parachain/node/src/service.rs similarity index 100% rename from cumulus/parachain-template/node/src/service.rs rename to templates/parachain/node/src/service.rs diff --git a/cumulus/parachain-template/pallets/template/Cargo.toml b/templates/parachain/pallets/template/Cargo.toml similarity index 68% rename from cumulus/parachain-template/pallets/template/Cargo.toml rename to templates/parachain/pallets/template/Cargo.toml index 04858a161fa43356e0082d99769fabd850be26a8..89eb9d5171630459e2e313ad94cee53e97921cac 100644 --- a/cumulus/parachain-template/pallets/template/Cargo.toml +++ b/templates/parachain/pallets/template/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-parachain-template" -authors = ["Anonymous"] description = "FRAME pallet template for defining custom runtime logic." -version = "0.7.0" -license = "Unlicense" -homepage = "https://substrate.io" +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true repository.workspace = true edition.workspace = true +publish = false [lints] workspace = true @@ -15,21 +16,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", +] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } -# Substrate +# frame deps frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system", default-features = false } [dev-dependencies] -serde = { workspace = true, default-features = true } - -# Substrate -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +sp-core = { path = "../../../../substrate/primitives/core" } +sp-io = { path = "../../../../substrate/primitives/io" } +sp-runtime = { path = "../../../../substrate/primitives/runtime" } [features] default = ["std"] @@ -41,7 +43,7 @@ runtime-benchmarks = [ ] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "scale-info/std", diff --git a/substrate/bin/node-template/pallets/template/README.md b/templates/parachain/pallets/template/README.md similarity index 100% rename from substrate/bin/node-template/pallets/template/README.md rename to templates/parachain/pallets/template/README.md diff --git a/substrate/bin/node-template/pallets/template/src/benchmarking.rs b/templates/parachain/pallets/template/src/benchmarking.rs similarity index 100% rename from substrate/bin/node-template/pallets/template/src/benchmarking.rs rename to templates/parachain/pallets/template/src/benchmarking.rs diff --git a/cumulus/parachain-template/pallets/template/src/lib.rs b/templates/parachain/pallets/template/src/lib.rs similarity index 96% rename from cumulus/parachain-template/pallets/template/src/lib.rs rename to templates/parachain/pallets/template/src/lib.rs index 24226d6cf4068d44820875540d8d209930860ca3..11587d1df426f485139eab9e333b292768c6c571 100644 --- a/cumulus/parachain-template/pallets/template/src/lib.rs +++ b/templates/parachain/pallets/template/src/lib.rs @@ -11,6 +11,8 @@ mod mock; #[cfg(test)] mod tests; +pub mod weights; + #[cfg(feature = "runtime-benchmarks")] mod benchmarking; @@ -24,6 +26,8 @@ pub mod pallet { pub trait Config: frame_system::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// A type representing the weights required by the dispatchables of this pallet. + type WeightInfo: crate::weights::WeightInfo; } #[pallet::pallet] diff --git a/cumulus/parachain-template/pallets/template/src/mock.rs b/templates/parachain/pallets/template/src/mock.rs similarity index 98% rename from cumulus/parachain-template/pallets/template/src/mock.rs rename to templates/parachain/pallets/template/src/mock.rs index 411a16b116c8f94757c29a686022842e159e6924..f510a8b773acf398ef2442254de25e75f867fcbb 100644 --- a/cumulus/parachain-template/pallets/template/src/mock.rs +++ b/templates/parachain/pallets/template/src/mock.rs @@ -51,6 +51,7 @@ impl system::Config for Test { impl crate::Config for Test { type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); } // Build genesis storage according to the mock runtime. diff --git a/cumulus/parachain-template/pallets/template/src/tests.rs b/templates/parachain/pallets/template/src/tests.rs similarity index 100% rename from cumulus/parachain-template/pallets/template/src/tests.rs rename to templates/parachain/pallets/template/src/tests.rs diff --git a/substrate/bin/node-template/pallets/template/src/weights.rs b/templates/parachain/pallets/template/src/weights.rs similarity index 100% rename from substrate/bin/node-template/pallets/template/src/weights.rs rename to templates/parachain/pallets/template/src/weights.rs diff --git a/cumulus/parachain-template/polkadot-launch/config.json b/templates/parachain/polkadot-launch/config.json similarity index 100% rename from cumulus/parachain-template/polkadot-launch/config.json rename to templates/parachain/polkadot-launch/config.json diff --git a/cumulus/parachain-template/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml similarity index 81% rename from cumulus/parachain-template/runtime/Cargo.toml rename to templates/parachain/runtime/Cargo.toml index 1873bd0a23ebdd55a4451a9f3936875a2935001c..44e9341b568af1f4dd039f275121b5baa535c820 100644 --- a/cumulus/parachain-template/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "parachain-template-runtime" -version = "0.7.0" -authors = ["Anonymous"] -description = "A new Cumulus FRAME-based Substrate Runtime, ready for hacking together a parachain." -license = "Unlicense" -homepage = "https://substrate.io" +description = "A parachain runtime template built with Substrate and Cumulus, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true repository.workspace = true edition.workspace = true +publish = false [lints] workspace = true @@ -18,16 +19,20 @@ targets = ["x86_64-unknown-linux-gnu"] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ + "derive", +] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } smallvec = "1.11.0" # Local pallet-parachain-template = { path = "../pallets/template", default-features = false } -# Substrate +# Substrate / FRAME frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false } @@ -35,6 +40,8 @@ frame-system = { path = "../../../substrate/frame/system", default-features = fa frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } + +# FRAME Pallets pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } @@ -44,6 +51,8 @@ pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } + +# Substrate Primitives sp-api = { path = "../../../substrate/primitives/api", default-features = false } sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } @@ -66,17 +75,19 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/x xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus -cumulus-pallet-aura-ext = { path = "../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } -cumulus-pallet-session-benchmarking = { path = "../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim", default-features = false } -pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } -parachains-common = { path = "../../parachains/common", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../parachains/pallets/parachain-info", default-features = false } +cumulus-pallet-aura-ext = { path = "../../../cumulus/pallets/aura-ext", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../cumulus/pallets/parachain-system", default-features = false, features = [ + "parameterized-consensus-hook", +] } +cumulus-pallet-session-benchmarking = { path = "../../../cumulus/pallets/session-benchmarking", default-features = false } +cumulus-pallet-xcm = { path = "../../../cumulus/pallets/xcm", default-features = false } +cumulus-pallet-xcmp-queue = { path = "../../../cumulus/pallets/xcmp-queue", default-features = false } +cumulus-primitives-core = { path = "../../../cumulus/primitives/core", default-features = false } +cumulus-primitives-utility = { path = "../../../cumulus/primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../cumulus/primitives/storage-weight-reclaim", default-features = false } +pallet-collator-selection = { path = "../../../cumulus/pallets/collator-selection", default-features = false } +parachains-common = { path = "../../../cumulus/parachains/common", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../cumulus/parachains/pallets/parachain-info", default-features = false } [features] default = ["std"] @@ -138,6 +149,7 @@ runtime-benchmarks = [ "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", "cumulus-primitives-core/runtime-benchmarks", + "cumulus-primitives-storage-weight-reclaim/runtime-benchmarks", "cumulus-primitives-utility/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -150,6 +162,7 @@ runtime-benchmarks = [ "pallet-parachain-template/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", diff --git a/cumulus/parachain-template/runtime/build.rs b/templates/parachain/runtime/build.rs similarity index 100% rename from cumulus/parachain-template/runtime/build.rs rename to templates/parachain/runtime/build.rs diff --git a/cumulus/parachain-template/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs similarity index 97% rename from cumulus/parachain-template/runtime/src/lib.rs rename to templates/parachain/runtime/src/lib.rs index cee9b33bf37cca9cab905223406453c5424a5894..74ecd751f672c77424cdae2d3e48c143681127ec 100644 --- a/cumulus/parachain-template/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -97,8 +97,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -112,7 +112,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -180,8 +180,8 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("template-parachain"), - impl_name: create_runtime_str!("template-parachain"), + spec_name: create_runtime_str!("parachain-template-runtime"), + impl_name: create_runtime_str!("parachain-template-runtime"), authoring_version: 1, spec_version: 1, impl_version: 0, @@ -353,6 +353,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = (); } impl pallet_sudo::Config for Runtime { @@ -489,6 +490,7 @@ impl pallet_collator_selection::Config for Runtime { /// Configure the pallet template in pallets/template. impl pallet_parachain_template::Config for Runtime { type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_parachain_template::weights::SubstrateWeight; } // Create the runtime by composing the FRAME pallets that were previously configured. @@ -529,6 +531,7 @@ construct_runtime!( mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_session, SessionBench::] [pallet_timestamp, Timestamp] @@ -560,7 +563,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -712,6 +715,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; let mut list = Vec::::new(); @@ -727,6 +731,7 @@ impl_runtime_apis! { use frame_benchmarking::{BenchmarkError, Benchmarking, BenchmarkBatch}; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); diff --git a/cumulus/parachain-template/runtime/src/weights/block_weights.rs b/templates/parachain/runtime/src/weights/block_weights.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/block_weights.rs rename to templates/parachain/runtime/src/weights/block_weights.rs diff --git a/cumulus/parachain-template/runtime/src/weights/extrinsic_weights.rs b/templates/parachain/runtime/src/weights/extrinsic_weights.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/extrinsic_weights.rs rename to templates/parachain/runtime/src/weights/extrinsic_weights.rs diff --git a/cumulus/parachain-template/runtime/src/weights/mod.rs b/templates/parachain/runtime/src/weights/mod.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/mod.rs rename to templates/parachain/runtime/src/weights/mod.rs diff --git a/cumulus/parachain-template/runtime/src/weights/paritydb_weights.rs b/templates/parachain/runtime/src/weights/paritydb_weights.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/paritydb_weights.rs rename to templates/parachain/runtime/src/weights/paritydb_weights.rs diff --git a/cumulus/parachain-template/runtime/src/weights/rocksdb_weights.rs b/templates/parachain/runtime/src/weights/rocksdb_weights.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/rocksdb_weights.rs rename to templates/parachain/runtime/src/weights/rocksdb_weights.rs diff --git a/cumulus/parachain-template/runtime/src/xcm_config.rs b/templates/parachain/runtime/src/xcm_config.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/xcm_config.rs rename to templates/parachain/runtime/src/xcm_config.rs diff --git a/substrate/bin/node-template/LICENSE b/templates/solochain/LICENSE similarity index 100% rename from substrate/bin/node-template/LICENSE rename to templates/solochain/LICENSE diff --git a/substrate/bin/node-template/README.md b/templates/solochain/README.md similarity index 100% rename from substrate/bin/node-template/README.md rename to templates/solochain/README.md diff --git a/substrate/bin/node-template/docs/rust-setup.md b/templates/solochain/docs/rust-setup.md similarity index 100% rename from substrate/bin/node-template/docs/rust-setup.md rename to templates/solochain/docs/rust-setup.md diff --git a/substrate/bin/node-template/env-setup/README.md b/templates/solochain/env-setup/README.md similarity index 100% rename from substrate/bin/node-template/env-setup/README.md rename to templates/solochain/env-setup/README.md diff --git a/substrate/bin/node-template/env-setup/flake.lock b/templates/solochain/env-setup/flake.lock similarity index 100% rename from substrate/bin/node-template/env-setup/flake.lock rename to templates/solochain/env-setup/flake.lock diff --git a/substrate/bin/node-template/env-setup/flake.nix b/templates/solochain/env-setup/flake.nix similarity index 100% rename from substrate/bin/node-template/env-setup/flake.nix rename to templates/solochain/env-setup/flake.nix diff --git a/substrate/bin/node-template/env-setup/rust-toolchain.toml b/templates/solochain/env-setup/rust-toolchain.toml similarity index 100% rename from substrate/bin/node-template/env-setup/rust-toolchain.toml rename to templates/solochain/env-setup/rust-toolchain.toml diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..5e50e6106c1dc43481627faf6803cc76e59c101b --- /dev/null +++ b/templates/solochain/node/Cargo.toml @@ -0,0 +1,92 @@ +[package] +name = "solochain-template-node" +description = "A solochain node template built with Substrate, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +build = "build.rs" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +clap = { version = "4.5.1", features = ["derive"] } +futures = { version = "0.3.21", features = ["thread-pool"] } +serde_json = { workspace = true, default-features = true } +jsonrpsee = { version = "0.22", features = ["server"] } + +# substrate client +sc-cli = { path = "../../../substrate/client/cli" } +sp-core = { path = "../../../substrate/primitives/core" } +sc-executor = { path = "../../../substrate/client/executor" } +sc-network = { path = "../../../substrate/client/network" } +sc-service = { path = "../../../substrate/client/service" } +sc-telemetry = { path = "../../../substrate/client/telemetry" } +sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } +sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } +sc-offchain = { path = "../../../substrate/client/offchain" } +sc-consensus-aura = { path = "../../../substrate/client/consensus/aura" } +sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } +sc-consensus = { path = "../../../substrate/client/consensus/common" } +sc-consensus-grandpa = { path = "../../../substrate/client/consensus/grandpa" } +sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } +sc-client-api = { path = "../../../substrate/client/api" } +sc-rpc-api = { path = "../../../substrate/client/rpc-api" } +sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } + +# substrate primitives +sp-runtime = { path = "../../../substrate/primitives/runtime" } +sp-io = { path = "../../../substrate/primitives/io" } +sp-timestamp = { path = "../../../substrate/primitives/timestamp" } +sp-inherents = { path = "../../../substrate/primitives/inherents" } +sp-keyring = { path = "../../../substrate/primitives/keyring" } +sp-api = { path = "../../../substrate/primitives/api" } +sp-blockchain = { path = "../../../substrate/primitives/blockchain" } +sp-block-builder = { path = "../../../substrate/primitives/block-builder" } + +# frame and pallets +frame-system = { path = "../../../substrate/frame/system" } +pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc = { path = "../../../substrate/frame/transaction-payment/rpc" } +substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system" } + +# These dependencies are used for runtime benchmarking +frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-cli" } + +# Local Dependencies +solochain-template-runtime = { path = "../runtime" } + +# CLI-specific dependencies +try-runtime-cli = { path = "../../../substrate/utils/frame/try-runtime/cli", optional = true } + +[build-dependencies] +substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } + +[features] +default = [] +# Dependencies that are only required if runtime benchmarking should be build. +runtime-benchmarks = [ + "frame-benchmarking-cli/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", + "sc-service/runtime-benchmarks", + "solochain-template-runtime/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +# Enable features that allow the runtime to be tried and debugged. Name might be subject to change +# in the near future. +try-runtime = [ + "frame-system/try-runtime", + "pallet-transaction-payment/try-runtime", + "solochain-template-runtime/try-runtime", + "sp-runtime/try-runtime", + "try-runtime-cli/try-runtime", +] diff --git a/substrate/bin/node-template/node/build.rs b/templates/solochain/node/build.rs similarity index 100% rename from substrate/bin/node-template/node/build.rs rename to templates/solochain/node/build.rs diff --git a/substrate/bin/node-template/node/src/benchmarking.rs b/templates/solochain/node/src/benchmarking.rs similarity index 97% rename from substrate/bin/node-template/node/src/benchmarking.rs rename to templates/solochain/node/src/benchmarking.rs index 6e29ad1a123118d953c1fad654bad9343fd8ca53..8710f303e1f03bd8471b38b10c68217a45225d5a 100644 --- a/substrate/bin/node-template/node/src/benchmarking.rs +++ b/templates/solochain/node/src/benchmarking.rs @@ -4,10 +4,10 @@ use crate::service::FullClient; -use node_template_runtime as runtime; use runtime::{AccountId, Balance, BalancesCall, SystemCall}; use sc_cli::Result; use sc_client_api::BlockBackend; +use solochain_template_runtime as runtime; use sp_core::{Encode, Pair}; use sp_inherents::{InherentData, InherentDataProvider}; use sp_keyring::Sr25519Keyring; @@ -109,7 +109,7 @@ pub fn create_benchmark_extrinsic( .checked_next_power_of_two() .map(|c| c / 2) .unwrap_or(2) as u64; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -121,11 +121,12 @@ pub fn create_benchmark_extrinsic( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), - ); + ) + .into(); let raw_payload = runtime::SignedPayload::from_raw( call.clone(), - extra.clone(), + tx_ext.clone(), ( (), runtime::VERSION.spec_version, @@ -143,7 +144,7 @@ pub fn create_benchmark_extrinsic( call, sp_runtime::AccountId32::from(sender.public()).into(), runtime::Signature::Sr25519(signature), - extra, + tx_ext, ) } diff --git a/substrate/bin/node-template/node/src/chain_spec.rs b/templates/solochain/node/src/chain_spec.rs similarity index 97% rename from substrate/bin/node-template/node/src/chain_spec.rs rename to templates/solochain/node/src/chain_spec.rs index 6e0d78f647a594d7e3c247041a7499691edaad1e..be49f2c1fc731ee4e5ece7841151068abf0f0790 100644 --- a/substrate/bin/node-template/node/src/chain_spec.rs +++ b/templates/solochain/node/src/chain_spec.rs @@ -1,5 +1,5 @@ -use node_template_runtime::{AccountId, RuntimeGenesisConfig, Signature, WASM_BINARY}; use sc_service::ChainType; +use solochain_template_runtime::{AccountId, RuntimeGenesisConfig, Signature, WASM_BINARY}; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{sr25519, Pair, Public}; diff --git a/substrate/bin/node-template/node/src/cli.rs b/templates/solochain/node/src/cli.rs similarity index 100% rename from substrate/bin/node-template/node/src/cli.rs rename to templates/solochain/node/src/cli.rs diff --git a/substrate/bin/node-template/node/src/command.rs b/templates/solochain/node/src/command.rs similarity index 98% rename from substrate/bin/node-template/node/src/command.rs rename to templates/solochain/node/src/command.rs index 3778df6642294684780c347730eb580b69b54d65..42d1477f22f18579d49fb48fdc91df39b10aaa2b 100644 --- a/substrate/bin/node-template/node/src/command.rs +++ b/templates/solochain/node/src/command.rs @@ -5,9 +5,9 @@ use crate::{ service, }; use frame_benchmarking_cli::{BenchmarkCmd, ExtrinsicFactory, SUBSTRATE_REFERENCE_HARDWARE}; -use node_template_runtime::{Block, EXISTENTIAL_DEPOSIT}; use sc_cli::SubstrateCli; use sc_service::PartialComponents; +use solochain_template_runtime::{Block, EXISTENTIAL_DEPOSIT}; use sp_keyring::Sr25519Keyring; impl SubstrateCli for Cli { diff --git a/substrate/bin/node-template/node/src/main.rs b/templates/solochain/node/src/main.rs similarity index 100% rename from substrate/bin/node-template/node/src/main.rs rename to templates/solochain/node/src/main.rs diff --git a/substrate/bin/node-template/node/src/rpc.rs b/templates/solochain/node/src/rpc.rs similarity index 96% rename from substrate/bin/node-template/node/src/rpc.rs rename to templates/solochain/node/src/rpc.rs index 246391adcbbe88a03b6cf9cf9043d82b8de18b60..fe2b6ca72ede5c8d9f8d878db88efa2da5c5ac97 100644 --- a/substrate/bin/node-template/node/src/rpc.rs +++ b/templates/solochain/node/src/rpc.rs @@ -8,8 +8,8 @@ use std::sync::Arc; use jsonrpsee::RpcModule; -use node_template_runtime::{opaque::Block, AccountId, Balance, Nonce}; use sc_transaction_pool_api::TransactionPool; +use solochain_template_runtime::{opaque::Block, AccountId, Balance, Nonce}; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; diff --git a/substrate/bin/node-template/node/src/service.rs b/templates/solochain/node/src/service.rs similarity index 94% rename from substrate/bin/node-template/node/src/service.rs rename to templates/solochain/node/src/service.rs index 25cd651178411c6338a2b0d1e7836804a3a5b676..dc25f7579129fe3ea86d0686732be5e6baec40ba 100644 --- a/substrate/bin/node-template/node/src/service.rs +++ b/templates/solochain/node/src/service.rs @@ -1,13 +1,13 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. use futures::FutureExt; -use node_template_runtime::{self, opaque::Block, RuntimeApi}; use sc_client_api::{Backend, BlockBackend}; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; use sc_consensus_grandpa::SharedVoterState; use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use solochain_template_runtime::{self, opaque::Block, RuntimeApi}; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; use std::{sync::Arc, time::Duration}; @@ -80,23 +80,29 @@ pub fn new_partial(config: &Configuration) -> Result { telemetry.as_ref().map(|x| x.handle()), )?; - let slot_duration = sc_consensus_aura::slot_duration(&*client)?; - + let cidp_client = client.clone(); let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { block_import: grandpa_block_import.clone(), justification_import: Some(Box::new(grandpa_block_import.clone())), client: client.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + create_inherent_data_providers: move |parent_hash, _| { + let cidp_client = cidp_client.clone(); + async move { + let slot_duration = sc_consensus_aura::standalone::slot_duration_at( + &*cidp_client, + parent_hash, + )?; + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); - Ok((slot, timestamp)) + Ok((slot, timestamp)) + } }, spawner: &task_manager.spawn_essential_handle(), registry: config.prometheus_registry(), diff --git a/substrate/bin/node-template/pallets/template/Cargo.toml b/templates/solochain/pallets/template/Cargo.toml similarity index 55% rename from substrate/bin/node-template/pallets/template/Cargo.toml rename to templates/solochain/pallets/template/Cargo.toml index 51410a71c7bcee0267f36bbfcf20c616a5537ce3..bd2347151989df8950dc4dd96f039c74241d28a5 100644 --- a/substrate/bin/node-template/pallets/template/Cargo.toml +++ b/templates/solochain/pallets/template/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "pallet-template" -version = "4.0.0-dev" description = "FRAME pallet template for defining custom runtime logic." -authors = ["Substrate DevHub "] -homepage = "https://substrate.io" -edition.workspace = true +version = "0.0.0" license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true publish = false -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" [lints] workspace = true @@ -19,16 +19,19 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../../../frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../frame/support", default-features = false } -frame-system = { path = "../../../../frame/system", default-features = false } -sp-std = { path = "../../../../primitives/std", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } + +# frame deps +frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-support = { path = "../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../substrate/frame/system", default-features = false } [dev-dependencies] -sp-core = { path = "../../../../primitives/core" } -sp-io = { path = "../../../../primitives/io" } -sp-runtime = { path = "../../../../primitives/runtime" } +sp-core = { path = "../../../../substrate/primitives/core" } +sp-io = { path = "../../../../substrate/primitives/io" } +sp-runtime = { path = "../../../../substrate/primitives/runtime" } [features] default = ["std"] @@ -41,7 +44,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/templates/solochain/pallets/template/README.md b/templates/solochain/pallets/template/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9e4dc55267d69c47fff971cb0427bcb2e0ff871c --- /dev/null +++ b/templates/solochain/pallets/template/README.md @@ -0,0 +1 @@ +License: MIT-0 diff --git a/templates/solochain/pallets/template/src/benchmarking.rs b/templates/solochain/pallets/template/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..5a262417629c579c6ecf5ada30ae803217623766 --- /dev/null +++ b/templates/solochain/pallets/template/src/benchmarking.rs @@ -0,0 +1,35 @@ +//! Benchmarking setup for pallet-template +#![cfg(feature = "runtime-benchmarks")] +use super::*; + +#[allow(unused)] +use crate::Pallet as Template; +use frame_benchmarking::v2::*; +use frame_system::RawOrigin; + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn do_something() { + let value = 100u32.into(); + let caller: T::AccountId = whitelisted_caller(); + #[extrinsic_call] + do_something(RawOrigin::Signed(caller), value); + + assert_eq!(Something::::get(), Some(value)); + } + + #[benchmark] + fn cause_error() { + Something::::put(100u32); + let caller: T::AccountId = whitelisted_caller(); + #[extrinsic_call] + cause_error(RawOrigin::Signed(caller)); + + assert_eq!(Something::::get(), Some(101u32)); + } + + impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/substrate/bin/node-template/pallets/template/src/lib.rs b/templates/solochain/pallets/template/src/lib.rs similarity index 100% rename from substrate/bin/node-template/pallets/template/src/lib.rs rename to templates/solochain/pallets/template/src/lib.rs diff --git a/substrate/bin/node-template/pallets/template/src/mock.rs b/templates/solochain/pallets/template/src/mock.rs similarity index 100% rename from substrate/bin/node-template/pallets/template/src/mock.rs rename to templates/solochain/pallets/template/src/mock.rs diff --git a/substrate/bin/node-template/pallets/template/src/tests.rs b/templates/solochain/pallets/template/src/tests.rs similarity index 100% rename from substrate/bin/node-template/pallets/template/src/tests.rs rename to templates/solochain/pallets/template/src/tests.rs diff --git a/templates/solochain/pallets/template/src/weights.rs b/templates/solochain/pallets/template/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..7c42936e09f292de831d28460a3bc39436c3323f --- /dev/null +++ b/templates/solochain/pallets/template/src/weights.rs @@ -0,0 +1,90 @@ + +//! Autogenerated weights for pallet_template +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Alexs-MacBook-Pro-2.local`, CPU: `` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// ../../target/release/node-template +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_template +// --extrinsic +// * +// --steps=50 +// --repeat=20 +// --wasm-execution=compiled +// --output +// pallets/template/src/weights.rs +// --template +// ../../.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for pallet_template. +pub trait WeightInfo { + fn do_something() -> Weight; + fn cause_error() -> Weight; +} + +/// Weights for pallet_template using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: TemplateModule Something (r:0 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn do_something() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: TemplateModule Something (r:1 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn cause_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1489` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 1489) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + /// Storage: TemplateModule Something (r:0 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn do_something() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: TemplateModule Something (r:1 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn cause_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1489` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 1489) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..4f22e7ff6a3b3500a5e788e93dc8c653f13443d9 --- /dev/null +++ b/templates/solochain/runtime/Cargo.toml @@ -0,0 +1,152 @@ +[package] +name = "solochain-template-runtime" +description = "A solochain runtime template built with Substrate, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", +] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", + "serde", +] } + +# frame +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } +frame-executive = { path = "../../../substrate/frame/executive", default-features = false } + +# frame pallets +pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } +pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } +pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } + +# primitives +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false, features = [ + "serde", +] } +sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false, features = [ + "serde", +] } +sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = [ + "serde", +] } +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = [ + "serde", +] } +sp-session = { path = "../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../substrate/primitives/version", default-features = false, features = [ + "serde", +] } +sp-genesis-builder = { default-features = false, path = "../../../substrate/primitives/genesis-builder" } + +# RPC related +frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } + +# Used for runtime benchmarking +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } + +# The pallet in this template. +pallet-template = { path = "../pallets/template", default-features = false } + +[build-dependencies] +substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + + "frame-executive/std", + "frame-support/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + + "frame-benchmarking?/std", + "frame-try-runtime?/std", + + "pallet-aura/std", + "pallet-balances/std", + "pallet-grandpa/std", + "pallet-sudo/std", + "pallet-template/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + + "sp-api/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-consensus-grandpa/std", + "sp-core/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-storage/std", + "sp-transaction-pool/std", + "sp-version/std", + + "substrate-wasm-builder", +] + +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-grandpa/runtime-benchmarks", + "pallet-sudo/runtime-benchmarks", + "pallet-template/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] + +try-runtime = [ + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-aura/try-runtime", + "pallet-balances/try-runtime", + "pallet-grandpa/try-runtime", + "pallet-sudo/try-runtime", + "pallet-template/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", + "sp-runtime/try-runtime", +] + +experimental = ["pallet-aura/experimental"] diff --git a/substrate/bin/node-template/runtime/build.rs b/templates/solochain/runtime/build.rs similarity index 100% rename from substrate/bin/node-template/runtime/build.rs rename to templates/solochain/runtime/build.rs diff --git a/substrate/bin/node-template/runtime/src/lib.rs b/templates/solochain/runtime/src/lib.rs similarity index 96% rename from substrate/bin/node-template/runtime/src/lib.rs rename to templates/solochain/runtime/src/lib.rs index 3b6a74be2512c6214b2fadada68bb22d746f07a9..409b639f021e37b805eb114a4501498e12adad2d 100644 --- a/substrate/bin/node-template/runtime/src/lib.rs +++ b/templates/solochain/runtime/src/lib.rs @@ -1,8 +1,5 @@ #![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit = "256"] -// Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); @@ -22,7 +19,6 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use frame_support::genesis_builder_helper::{build_config, create_default_config}; -// A few exports that help ease life for downstream crates. pub use frame_support::{ construct_runtime, derive_impl, parameter_types, traits::{ @@ -95,8 +91,8 @@ pub mod opaque { // https://docs.substrate.io/main-docs/build/upgrade#runtime-versioning #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("node-template"), - impl_name: create_runtime_str!("node-template"), + spec_name: create_runtime_str!("solochain-template-runtime"), + impl_name: create_runtime_str!("solochain-template-runtime"), authoring_version: 1, // The version of the runtime specification. A full node will not attempt to use its native // runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, @@ -241,6 +237,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = IdentityFee; type LengthToFee = IdentityFee; type FeeMultiplierUpdate = ConstFeeMultiplier; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } impl pallet_sudo::Config for Runtime { @@ -265,6 +262,7 @@ construct_runtime!( Balances: pallet_balances, TransactionPayment: pallet_transaction_payment, Sudo: pallet_sudo, + // Include the custom logic from the pallet-template in the runtime. TemplateModule: pallet_template, } @@ -276,8 +274,8 @@ pub type Address = sp_runtime::MultiAddress; pub type Header = generic::Header; /// Block type as expected by this runtime. pub type Block = generic::Block; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -296,9 +294,9 @@ type Migrations = (); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -314,6 +312,7 @@ mod benches { frame_benchmarking::define_benchmarks!( [frame_benchmarking, BaselineBench::] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_timestamp, Timestamp] [pallet_sudo, Sudo] @@ -331,7 +330,7 @@ impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } @@ -498,6 +497,7 @@ impl_runtime_apis! { use frame_benchmarking::{baseline, Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; let mut list = Vec::::new(); @@ -514,6 +514,7 @@ impl_runtime_apis! { use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; impl frame_system_benchmarking::Config for Runtime {}